diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 45b648fa35..c08e003ea1 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -57,7 +57,7 @@ jobs: # For the sdist we should be as conservative as possible with our # Python version. This should be the lowest supported version. This # means that no unsupported syntax can sneak through. - python-version: '3.8' + python-version: '3.9' - name: Install pip build run: | @@ -107,11 +107,11 @@ jobs: matrix: os: [ubuntu-latest, windows-latest, macos-latest] env: - # Set up wheels matrix. This is CPython 3.8--3.10 for all OS targets. - CIBW_BUILD: "cp3{8,9,10,11}-*" + # Set up wheels matrix. This is CPython 3.9--3.12 for all OS targets. + CIBW_BUILD: "cp3{9,10,11,12}-*" # Numpy and SciPy do not supply wheels for i686 or win32 for # Python 3.10+, so we skip those: - CIBW_SKIP: "*-musllinux* cp3{8,9,10,11}-manylinux_i686 cp3{8,9,10,11}-win32" + CIBW_SKIP: "*-musllinux* cp3{10,11,12}-manylinux_i686 cp3{10,11,12}-win32" OVERRIDE_VERSION: ${{ github.event.inputs.override_version }} steps: @@ -121,7 +121,7 @@ jobs: name: Install Python with: # This is about the build environment, not the released wheel version. - python-version: '3.8' + python-version: '3.9' - name: Install cibuildwheel run: | @@ -165,12 +165,12 @@ jobs: - uses: actions/setup-python@v4 name: Install Python with: - python-version: '3.8' + python-version: '3.9' - name: Verify this is not a dev version shell: bash run: | - python -m pip install wheels/*-cp38-cp38-manylinux*.whl + python -m pip install wheels/*-cp39-cp39-manylinux*.whl python -c 'import qutip; print(qutip.__version__); assert "dev" not in qutip.__version__; assert "+" not in qutip.__version__' # We built the zipfile for convenience distributing to Windows users on @@ -198,12 +198,12 @@ jobs: - uses: actions/setup-python@v4 name: Install Python with: - python-version: '3.8' + python-version: '3.9' - name: Verify this is not a dev version shell: bash run: | - python -m pip install wheels/*-cp38-cp38-manylinux*.whl + python -m pip install wheels/*-cp39-cp39-manylinux*.whl python -c 'import qutip; print(qutip.__version__); assert "dev" not in qutip.__version__; assert "+" not in qutip.__version__' # We built the zipfile for convenience distributing to Windows users on diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index f44aa7e8f2..c046f02a4e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -27,58 +27,51 @@ jobs: os: [ubuntu-latest] # Test other versions of Python in special cases to avoid exploding the # matrix size; make sure to test all supported versions in some form. - python-version: ["3.9"] + python-version: ["3.11"] case-name: [defaults] - numpy-requirement: [">=1.20,<1.21"] + numpy-requirement: [">=1.22"] + scipy-requirement: [">=1.8"] coverage-requirement: ["==6.5"] # Extra special cases. In these, the new variable defined should always # be a truth-y value (hence 'nomkl: 1' rather than 'mkl: 0'), because # the lack of a variable is _always_ false-y, and the defaults lack all # the special cases. include: - # Mac - # Mac has issues with MKL since september 2022. - - case-name: macos - os: macos-latest - python-version: "3.10" - condaforge: 1 - nomkl: 1 - - # Scipy 1.5 - - case-name: old SciPy + # Python 3.9, Scipy 1.7, numpy 1.22 + # On more version than suggested by SPEC 0 + # https://scientific-python.org/specs/spec-0000/ + # There are deprecation warnings when using cython 0.29.X + - case-name: Old setup os: ubuntu-latest - python-version: "3.8" - numpy-requirement: ">=1.20,<1.21" - scipy-requirement: ">=1.5,<1.6" + python-version: "3.9" + scipy-requirement: ">=1.8,<1.9" + numpy-requirement: ">=1.22,<1.23" condaforge: 1 oldcython: 1 pytest-extra-options: "-W ignore:dep_util:DeprecationWarning" - # No MKL runs. MKL is now the default for conda installations, but - # not necessarily for pip. - - case-name: no MKL - os: ubuntu-latest - python-version: "3.9" - numpy-requirement: ">=1.20,<1.21" - nomkl: 1 - - # Builds without Cython at runtime. This is a core feature; - # everything should be able to run this. - - case-name: no Cython - os: ubuntu-latest - python-version: "3.8" - nocython: 1 - - # Python 3.10 and numpy 1.22 - # Use conda-forge to provide numpy 1.22 - - case-name: Python 3.10 + # Python 3.10, no mkl, scipy 1.9, numpy 1.23 + # Scipy 1.9 did not support cython 3.0 yet. + # cython#17234 + - case-name: no mkl os: ubuntu-latest python-version: "3.10" + scipy-requirement: ">=1.9,<1.10" + numpy-requirement: ">=1.23,<1.24" condaforge: 1 oldcython: 1 + nomkl: 1 pytest-extra-options: "-W ignore:dep_util:DeprecationWarning" - # Python 3.11 and latest numpy + # Python 3.10, no cython, scipy 1.10, numpy 1.24 + - case-name: no cython + os: ubuntu-latest + python-version: "3.10" + scipy-requirement: ">=1.10,<1.11" + numpy-requirement: ">=1.24,<1.25" + nocython: 1 + + # Python 3.11 and recent numpy # Use conda-forge to provide Python 3.11 and latest numpy # Ignore deprecation of the cgi module in Python 3.11 that is # still imported by Cython.Tempita. This was addressed in @@ -88,8 +81,30 @@ jobs: os: ubuntu-latest python-version: "3.11" condaforge: 1 + scipy-requirement: ">=1.11,<1.12" + numpy-requirement: ">=1.25,<1.26" conda-extra-pkgs: "suitesparse" # for compiling cvxopt - pytest-extra-options: "-W ignore::DeprecationWarning:Cython.Tempita" + + # Python 3.12 and latest numpy + # Use conda-forge to provide Python 3.11 and latest numpy + - case-name: Python 3.12 + os: ubuntu-latest + python-version: "3.12" + scipy-requirement: ">=1.12,<1.13" + numpy-requirement: ">=1.26,<1.27" + condaforge: 1 + pytest-extra-options: "-W ignore:datetime:DeprecationWarning" + # Install mpi4py to test mpi_pmap + # Should be enough to include this in one of the runs + includempi: 1 + + # Mac + # Mac has issues with MKL since september 2022. + - case-name: macos + os: macos-latest + python-version: "3.11" + condaforge: 1 + nomkl: 1 # Windows. Once all tests pass without special options needed, this # can be moved to the main os list in the test matrix. All the tests @@ -97,13 +112,12 @@ jobs: # multiprocessing under the hood. Windows does not support fork() # well, which makes transfering objects to the child processes # error prone. See, e.g., https://github.com/qutip/qutip/issues/1202 - - case-name: Windows Latest + - case-name: Windows os: windows-latest - python-version: "3.10" + python-version: "3.11" steps: - uses: actions/checkout@v3 - - uses: conda-incubator/setup-miniconda@v2 with: auto-update-conda: true @@ -115,7 +129,7 @@ jobs: # rather than in the GitHub Actions file directly, because bash gives us # a proper programming language to use. run: | - QUTIP_TARGET="tests,graphics,semidefinite,ipython" + QUTIP_TARGET="tests,graphics,semidefinite,ipython,extras" if [[ -z "${{ matrix.nocython }}" ]]; then QUTIP_TARGET="$QUTIP_TARGET,runtime_compilation" fi @@ -135,6 +149,10 @@ jobs: if [[ -n "${{ matrix.conda-extra-pkgs }}" ]]; then conda install "${{ matrix.conda-extra-pkgs }}" fi + if [[ "${{ matrix.includempi }}" ]]; then + # Use openmpi because mpich causes problems. Note, environment variable names change in v5 + conda install "openmpi<5" mpi4py + fi python -m pip install -e .[$QUTIP_TARGET] python -m pip install "coverage${{ matrix.coverage-requirement }}" python -m pip install pytest-cov coveralls pytest-fail-slow @@ -166,6 +184,12 @@ jobs: # truly being executed. export QUTIP_NUM_PROCESSES=2 fi + if [[ "${{ matrix.includempi }}" ]]; then + # By default, the max. number of allowed worker processes in openmpi is + # (number of physical cpu cores) - 1. + # We only have 2 physical cores, but we want to test mpi_pmap with 2 workers. + export OMPI_MCA_rmaps_base_oversubscribe=true + fi pytest -Werror --strict-config --strict-markers --fail-slow=300 --durations=0 --durations-min=1.0 --verbosity=1 --cov=qutip --cov-report= --color=yes ${{ matrix.pytest-extra-options }} qutip/tests # Above flags are: # -Werror diff --git a/README.md b/README.md index 376e25b3a8..ba6dbf7c6c 100644 --- a/README.md +++ b/README.md @@ -48,7 +48,11 @@ Support [![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://numfocus.org) We are proud to be affiliated with [Unitary Fund](https://unitary.fund) and [numFOCUS](https://numfocus.org). -QuTiP development is supported by [Nori's lab](https://dml.riken.jp/) at RIKEN, by the University of Sherbrooke, and by Aberystwyth University, [among other supporting organizations](https://qutip.org/#supporting-organizations). + +We are grateful for [Nori's lab](https://dml.riken.jp/) at RIKEN and [Blais' lab](https://www.physique.usherbrooke.ca/blais/) at the Institut Quantique +for providing developer positions to work on QuTiP. + +We also thank Google for supporting us by financing GSoC students to work on the QuTiP as well as [other supporting organizations](https://qutip.org/#supporting-organizations) that have been supporting QuTiP over the years. Installation diff --git a/doc/QuTiP_tree_plot/qutip-structure.py b/doc/QuTiP_tree_plot/qutip-structure.py index 7234598fba..b112303b0f 100755 --- a/doc/QuTiP_tree_plot/qutip-structure.py +++ b/doc/QuTiP_tree_plot/qutip-structure.py @@ -37,7 +37,7 @@ ("#043c6b", {"settings", "configrc", "solver"}), # Visualisation ("#3f8fd2", { - "bloch", "bloch3d", "sphereplot", "orbital", "visualization", "wigner", + "bloch", "sphereplot", "orbital", "visualization", "wigner", "distributions", "tomography", "topology", }), # Operators diff --git a/doc/apidoc/classes.rst b/doc/apidoc/classes.rst index c6c3548a05..c8182bcb94 100644 --- a/doc/apidoc/classes.rst +++ b/doc/apidoc/classes.rst @@ -31,9 +31,6 @@ Bloch sphere .. autoclass:: qutip.bloch.Bloch :members: -.. autoclass:: qutip.bloch3d.Bloch3d - :members: - Distributions ------------- diff --git a/doc/apidoc/functions.rst b/doc/apidoc/functions.rst index c54628f1c4..f64707a415 100644 --- a/doc/apidoc/functions.rst +++ b/doc/apidoc/functions.rst @@ -41,7 +41,7 @@ Random Operators and States --------------------------- .. automodule:: qutip.random_objects - :members: rand_dm, rand_herm, rand_ket, rand_stochastic, rand_unitary, rand_super, rand_super_bcsz + :members: rand_dm, rand_herm, rand_ket, rand_stochastic, rand_unitary, rand_super, rand_super_bcsz, rand_kraus_map Superoperators and Liouvillians @@ -297,7 +297,7 @@ Parallelization --------------- .. automodule:: qutip.solver.parallel - :members: parallel_map, serial_map, loky_pmap + :members: parallel_map, serial_map, loky_pmap, mpi_pmap .. _functions-ipython: diff --git a/doc/biblio.rst b/doc/biblio.rst index 248e492fa4..1bdabce891 100644 --- a/doc/biblio.rst +++ b/doc/biblio.rst @@ -1,5 +1,5 @@ .. _biblo: - + Bibliography ============ @@ -14,7 +14,7 @@ Bibliography .. The trick with |text|_ is to get an italic link, and is described in the Docutils FAQ at https://docutils.sourceforge.net/FAQ.html#is-nested-inline-markup-possible. - + .. |theory-qi| replace:: *Theory of Quantum Information* .. _theory-qi: https://cs.uwaterloo.ca/~watrous/TQI-notes/ @@ -39,10 +39,10 @@ Bibliography .. [WBC11] C. Wood, J. Biamonte, D. G. Cory, *Tensor networks and graphical calculus for open quantum systems*. :arxiv:`1111.6950` - + .. [dAless08] D. d’Alessandro, *Introduction to Quantum Control and Dynamics*, (Chapman & Hall/CRC, 2008). - + .. [Byrd95] R. H. Byrd, P. Lu, J. Nocedal, and C. Zhu, *A Limited Memory Algorithm for Bound Constrained Optimization*, SIAM J. Sci. Comput. **16**, 1190 (1995). :doi:`10.1137/0916069` @@ -51,19 +51,16 @@ Bibliography .. [Lloyd14] S. Lloyd and S. Montangero, *Information theoretical analysis of quantum optimal control*, Phys. Rev. Lett. **113**, 010502 (2014). :doi:`10.1103/PhysRevLett.113.010502` - + .. [Doria11] P. Doria, T. Calarco & S. Montangero, *Optimal Control Technique for Many-Body Quantum Dynamics*, Phys. Rev. Lett. **106**, 190501 (2011). :doi:`10.1103/PhysRevLett.106.190501` - + .. [Caneva11] T. Caneva, T. Calarco, & S. Montangero, *Chopped random-basis quantum optimization*, Phys. Rev. A **84**, 022326 (2011). :doi:`10.1103/PhysRevA.84.022326` - + .. [Rach15] N. Rach, M. M. Müller, T. Calarco, and S. Montangero, *Dressing the chopped-random-basis optimization: A bandwidth-limited access to the trap-free landscape*, Phys. Rev. A. **92**, 062343 (2015). :doi:`10.1103/PhysRevA.92.062343` -.. [DYNAMO] - S. Machnes, U. Sander, S. J. Glaser, P. De Fouquieres, A. Gruslys, S. Schirmer, and T. Schulte-Herbrueggen, *Comparing, Optimising and Benchmarking Quantum Control Algorithms in a Unifying Programming Framework*, Phys. Rev. A. **84**, 022305 (2010). :arxiv:`1011.4874` - .. [Wis09] Wiseman, H. M. & Milburn, G. J. *Quantum Measurement and Control*, (Cambridge University Press, 2009). @@ -76,4 +73,4 @@ Bibliography B. Donvil, P. Muratore-Ginanneschi, *Quantum trajectory framework for general time-local master equations*, Nat Commun **13**, 4140 (2022). :doi:`10.1038/s41467-022-31533-8`. .. [Abd19] - M. Abdelhafez, D. I. Schuster, J. Koch, *Gradient-based optimal control of open quantum systems using quantumtrajectories and automatic differentiation*, Phys. Rev. A **99**, 052327 (2019). :doi:`10.1103/PhysRevA.99.052327`. \ No newline at end of file + M. Abdelhafez, D. I. Schuster, J. Koch, *Gradient-based optimal control of open quantum systems using quantumtrajectories and automatic differentiation*, Phys. Rev. A **99**, 052327 (2019). :doi:`10.1103/PhysRevA.99.052327`. diff --git a/doc/changes/1974.bugfix b/doc/changes/1974.bugfix new file mode 100644 index 0000000000..f7b521a0bf --- /dev/null +++ b/doc/changes/1974.bugfix @@ -0,0 +1 @@ +Add the possibility to customize point colors as in V4 and fix point plot behavior for 'l' style \ No newline at end of file diff --git a/doc/changes/2296.feature b/doc/changes/2296.feature new file mode 100644 index 0000000000..8fed655652 --- /dev/null +++ b/doc/changes/2296.feature @@ -0,0 +1 @@ +Added mpi_pmap, which uses the mpi4py module to run computations in parallel through the MPI interface. \ No newline at end of file diff --git a/doc/changes/2303.feature b/doc/changes/2303.feature new file mode 100644 index 0000000000..db293b995f --- /dev/null +++ b/doc/changes/2303.feature @@ -0,0 +1 @@ +Only pre-compute density matricies if keep_runs_results is False \ No newline at end of file diff --git a/doc/changes/2306.misc b/doc/changes/2306.misc new file mode 100644 index 0000000000..eb9042599c --- /dev/null +++ b/doc/changes/2306.misc @@ -0,0 +1 @@ +Remove Bloch3D: redundant to Bloch \ No newline at end of file diff --git a/doc/changes/2311.misc b/doc/changes/2311.misc new file mode 100644 index 0000000000..d5712b0c02 --- /dev/null +++ b/doc/changes/2311.misc @@ -0,0 +1 @@ +Allow tests to run without matplotlib and ipython. \ No newline at end of file diff --git a/doc/changes/2313.misc b/doc/changes/2313.misc new file mode 100644 index 0000000000..8020b81549 --- /dev/null +++ b/doc/changes/2313.misc @@ -0,0 +1 @@ +Add too small step warnings in fixed dt SODE solver \ No newline at end of file diff --git a/doc/changes/2325.misc b/doc/changes/2325.misc new file mode 100644 index 0000000000..9b64dc7253 --- /dev/null +++ b/doc/changes/2325.misc @@ -0,0 +1 @@ +Add `dtype` to `Qobj` and `QobjEvo` \ No newline at end of file diff --git a/doc/guide/figures/bloch3d+data.png b/doc/guide/figures/bloch3d+data.png deleted file mode 100644 index 4214368d3b..0000000000 Binary files a/doc/guide/figures/bloch3d+data.png and /dev/null differ diff --git a/doc/guide/figures/bloch3d+points.png b/doc/guide/figures/bloch3d+points.png deleted file mode 100644 index 14a8030365..0000000000 Binary files a/doc/guide/figures/bloch3d+points.png and /dev/null differ diff --git a/doc/guide/figures/bloch3d-blank.png b/doc/guide/figures/bloch3d-blank.png deleted file mode 100644 index 7888a7a896..0000000000 Binary files a/doc/guide/figures/bloch3d-blank.png and /dev/null differ diff --git a/doc/guide/guide-basics.rst b/doc/guide/guide-basics.rst index 072b9a939d..1d17cf0976 100644 --- a/doc/guide/guide-basics.rst +++ b/doc/guide/guide-basics.rst @@ -1,8 +1,8 @@ .. _basics: -************************************ +*********************************** Basic Operations on Quantum Objects -************************************ +*********************************** .. _basics-first: @@ -179,6 +179,8 @@ Therefore, QuTiP includes predefined objects for a variety of states and operato +--------------------------+----------------------------+----------------------------------------+ | Identity | ``qeye(N)`` | N = number of levels in Hilbert space. | +--------------------------+----------------------------+----------------------------------------+ +| Identity-like | ``qeye_like(qobj)`` | qobj = Object to copy dimensions from. | ++--------------------------+----------------------------+----------------------------------------+ | Lowering (destruction) | ``destroy(N)`` | same as above | | operator | | | +--------------------------+----------------------------+----------------------------------------+ @@ -321,12 +323,43 @@ For the destruction operator above: False >>> q.data + Dia(shape=(4, 4), num_diag=1) + + +The ``data`` attribute returns a Qutip diagonal matrix. +``Qobj`` instances store their data in Qutip matrix format. +In the core qutip module, the ``Dense``, ``CSR`` and ``Dia`` formats are available, but other packages can add other formats. +For example, the ``qutip-jax`` module adds the ``Jax`` and ``JaxDia`` formats. +One can always access the underlying matrix as a numpy array using :meth:`.Qobj.full`. +It is also possible to access the underlying data in a common format using :meth:`.Qobj.data_as`. + +.. doctest:: [basics] + :options: +NORMALIZE_WHITESPACE + + >>> q.data_as("dia_matrix") <4x4 sparse matrix of type '' - with 3 stored elements in Compressed Sparse Row format> + with 3 stored elements (1 diagonals) in DIAgonal format> +Conversion between storage type is done using the :meth:`.Qobj.to` method. -The data attribute returns a message stating that the data is a sparse matrix. All ``Qobj`` instances store their data as a sparse matrix to save memory. To access the underlying dense matrix one needs to use the :func:`qutip.Qobj.full` function as described below. +.. doctest:: [basics] + :options: +NORMALIZE_WHITESPACE + + >>> q.to("CSR").data + CSR(shape=(4, 4), nnz=3) + + >>> q.to("CSR").data_as("CSR_matrix") + <4x4 sparse matrix of type '' + with 3 stored elements in Compressed Sparse Row format> + + +Note that :meth:`.Qobj.data_as` does not do the conversion. + +QuTiP will do conversion when needed to keep everything working in any format. +However these conversions could slow down computation and it is recommended to keep to one format family where possible. +For example, core QuTiP ``Dense`` and ``CSR`` work well together and binary operations between these formats is efficient. +However binary operations between ``Dense`` and ``Jax`` should be avoided since it is not always clear whether the operation will be executed by Jax (possibly on a GPU if present) or numpy. .. _basics-qobj-math: @@ -397,7 +430,7 @@ In addition, the logic operators "is equal" `==` and "is not equal" `!=` are als .. _basics-functions: Functions operating on Qobj class -================================== +================================= Like attributes, the quantum object class has defined functions (methods) that operate on ``Qobj`` class instances. For a general quantum object ``Q``: @@ -429,6 +462,8 @@ Like attributes, the quantum object class has defined functions (methods) that o +-----------------+-------------------------------+----------------------------------------+ | Groundstate | ``Q.groundstate()`` | Eigenval & eigket of Qobj groundstate. | +-----------------+-------------------------------+----------------------------------------+ +| Matrix inverse | ``Q.inv()`` | Matrix inverse of the Qobj. | ++-----------------+-------------------------------+----------------------------------------+ | Matrix Element | ``Q.matrix_element(bra,ket)`` | Matrix element | +-----------------+-------------------------------+----------------------------------------+ | Norm | ``Q.norm()`` | Returns L2 norm for states, | @@ -454,6 +489,8 @@ Like attributes, the quantum object class has defined functions (methods) that o +-----------------+-------------------------------+----------------------------------------+ | Trace | ``Q.tr()`` | Returns trace of quantum object. | +-----------------+-------------------------------+----------------------------------------+ +| Conversion | ``Q.to(dtype)`` | Convert the matrix format CSR / Dense. | ++-----------------+-------------------------------+----------------------------------------+ | Transform | ``Q.transform(inpt)`` | A basis transformation defined by | | | | matrix or list of kets 'inpt' . | +-----------------+-------------------------------+----------------------------------------+ diff --git a/doc/guide/guide-bloch.rst b/doc/guide/guide-bloch.rst index 976264320e..ad351a5f51 100644 --- a/doc/guide/guide-bloch.rst +++ b/doc/guide/guide-bloch.rst @@ -9,39 +9,28 @@ Plotting on the Bloch Sphere Introduction ============ -When studying the dynamics of a two-level system, it is often convenient to visualize the state of the system by plotting the state-vector or density matrix on the Bloch sphere. In QuTiP, we have created two different classes to allow for easy creation and manipulation of data sets, both vectors and data points, on the Bloch sphere. The :class:`qutip.bloch.Bloch` class, uses Matplotlib to render the Bloch sphere, where as :class:`qutip.bloch3d.Bloch3d` uses the Mayavi rendering engine to generate a more faithful 3D reconstruction of the Bloch sphere. +When studying the dynamics of a two-level system, it is often convenient to visualize the state of the system by plotting the state-vector or density matrix on the Bloch sphere. In QuTiP, there is a class to allow for easy creation and manipulation of data sets, both vectors and data points, on the Bloch sphere. .. _bloch-class: -The Bloch and Bloch3d Classes -============================= +The Bloch Class +=============== In QuTiP, creating a Bloch sphere is accomplished by calling either: .. plot:: - :context: + :context: reset b = qutip.Bloch() -which will load an instance of the :class:`qutip.bloch.Bloch` class, or using :: - - >>> b3d = qutip.Bloch3d() - -that loads the :class:`qutip.bloch3d.Bloch3d` version. Before getting into the details of these objects, we can simply plot the blank Bloch sphere associated with these instances via: +which will load an instance of the :class:`~qutip.bloch.Bloch` class. +Before getting into the details of these objects, we can simply plot the blank Bloch sphere associated with these instances via: .. plot:: :context: b.make_sphere() -or - -.. _image-blank3d: - -.. figure:: figures/bloch3d-blank.png - :width: 3.5in - :figclass: align-center - In addition to the ``show`` command, see the API documentation for :class:`~qutip.bloch.Bloch` for a full list of other available functions. As an example, we can add a single data point: @@ -87,14 +76,7 @@ In total, the code for constructing our Bloch sphere with one vector, one state, b.add_states(up) b.render() -where we have removed the extra ``show()`` commands. Replacing ``b=Bloch()`` with ``b=Bloch3d()`` in the above code generates the following 3D Bloch sphere. - -.. _image-bloch3ddata: - -.. figure:: figures/bloch3d+data.png - :width: 3.5in - :figclass: align-center - +where we have removed the extra ``show()`` commands. We can also plot multiple points, vectors, and states at the same time by passing list or arrays instead of individual elements. Before giving an example, we can use the `clear()` command to remove the current data from our Bloch sphere instead of creating a new instance: @@ -183,26 +165,9 @@ Now, the data points cycle through a variety of predefined colors. Now lets add b.add_points([xz, yz, zz]) # no 'm' b.render() -Again, the same plot can be generated using the :class:`qutip.bloch3d.Bloch3d` class by replacing ``Bloch`` with ``Bloch3d``: - -.. figure:: figures/bloch3d+points.png - :width: 3.5in - :figclass: align-center A more slick way of using this 'multi' color feature is also given in the example, where we set the color of the markers as a function of time. -Differences Between Bloch and Bloch3d -------------------------------------- -While in general the ``Bloch`` and ``Bloch3d`` classes are interchangeable, there are some important differences to consider when choosing between them. - -- The ``Bloch`` class uses Matplotlib to generate figures. As such, the data plotted on the sphere is in reality just a 2D object. In contrast the ``Bloch3d`` class uses the 3D rendering engine from VTK via mayavi to generate the sphere and the included data. In this sense the ``Bloch3d`` class is much more advanced, as objects are rendered in 3D leading to a higher quality figure. - -- Only the ``Bloch`` class can be embedded in a Matplotlib figure window. Thus if you want to combine a Bloch sphere with another figure generated in QuTiP, you can not use ``Bloch3d``. Of course you can always post-process your figures using other software to get the desired result. - -- Due to limitations in the rendering engine, the ``Bloch3d`` class does not support LaTeX for text. Again, you can get around this by post-processing. - -- The user customizable attributes for the ``Bloch`` and ``Bloch3d`` classes are not identical. Therefore, if you change the properties of one of the classes, these changes will cause an exception if the class is switched. - .. _bloch-config: @@ -270,67 +235,6 @@ At the end of the last section we saw that the colors and marker shapes of the d | b.zlpos | Position of z-axis labels | ``[1.2, -1.2]`` | +---------------+---------------------------------------------------------+-------------------------------------------------+ -Bloch3d Class Options ---------------------- - -The Bloch3d sphere is also customizable. Note however that the attributes for the ``Bloch3d`` class are not in one-to-one -correspondence to those of the ``Bloch`` class due to the different underlying rendering engines. Assuming ``b=Bloch3d()``: - -.. tabularcolumns:: | p{3cm} | p{7cm} | p{7cm} | - -.. cssclass:: table-striped - -+---------------+---------------------------------------------------------+---------------------------------------------+ -| Attribute | Function | Default Setting | -+===============+=========================================================+=============================================+ -| b.fig | User supplied Mayavi Figure instance. Set by ``fig`` | ``None`` | -| | keyword arg. | | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.font_color | Color of fonts | ``'black'`` | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.font_scale | Scale of fonts | 0.08 | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.frame | Draw wireframe for sphere? | ``True`` | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.frame_alpha | Transparency of wireframe | 0.05 | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.frame_color | Color of wireframe | ``'gray'`` | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.frame_num | Number of wireframe elements to draw | 8 | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.frame_radius| Radius of wireframe lines | 0.005 | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.point_color | List of colors for Bloch point markers to cycle through | ``['r', 'g', 'b', 'y']`` | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.point_mode | Type of point markers to draw | ``'sphere'`` | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.point_size | Size of points | 0.075 | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.sphere_alpha| Transparency of Bloch sphere | 0.1 | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.sphere_color| Color of Bloch sphere | ``'#808080'`` | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.size | Sets size of figure window | ``[500, 500]`` (500x500 pixels) | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.vector_color| List of colors for Bloch vectors to cycle through | ``['r', 'g', 'b', 'y']`` | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.vector_width| Width of Bloch vectors | 3 | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.view | Azimuthal and Elevation viewing angles | ``[45, 65]`` | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.xlabel | Labels for x-axis | ``['|x>', '']`` +x and -x | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.xlpos | Position of x-axis labels | ``[1.07, -1.07]`` | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.ylabel | Labels for y-axis | ``['$y$', '']`` +y and -y | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.ylpos | Position of y-axis labels | ``[1.07, -1.07]`` | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.zlabel | Labels for z-axis | ``['|0>', '|1>']`` +z and -z | -+---------------+---------------------------------------------------------+---------------------------------------------+ -| b.zlpos | Position of z-axis labels | ``[1.07, -1.07]`` | -+---------------+---------------------------------------------------------+---------------------------------------------+ - These properties can also be accessed via the print command: .. doctest:: diff --git a/doc/guide/guide-control.rst b/doc/guide/guide-control.rst index f3c31ad7e9..e0769cd592 100644 --- a/doc/guide/guide-control.rst +++ b/doc/guide/guide-control.rst @@ -191,65 +191,10 @@ algorithm. Optimal Quantum Control in QuTiP ================================ -There are two separate implementations of optimal control inside QuTiP. The -first is an implementation of first order GRAPE, and is not further described -here, but there are the example notebooks. The second is referred to as Qtrl -(when a distinction needs to be made) as this was its name before it was -integrated into QuTiP. Qtrl uses the Scipy optimize functions to perform the -multi-variable optimisation, typically the L-BFGS-B method for GRAPE and -Nelder-Mead for CRAB. The GRAPE implementation in Qtrl was initially based on -the open-source package DYNAMO, which is a MATLAB implementation, and is -described in [DYNAMO]_. It has since been restructured and extended for -flexibility and compatibility within QuTiP. - -The rest of this section describes the Qtrl implementation and how to use it. - -Object Model - The Qtrl code is organised in a hierarchical object model in order to try and maximise configurability whilst maintaining some clarity. It is not necessary to understand the model in order to use the pulse optimisation functions, but it is the most flexible method of using Qtrl. If you just want to use a simple single function call interface, then jump to :ref:`pulseoptim-functions` - -.. figure:: figures/qtrl-code_object_model.png - :align: center - :width: 3.5in - - Qtrl code object model. - -The object's properties and methods are described in detail in the documentation, so that will not be repeated here. - -OptimConfig - The OptimConfig object is used simply to hold configuration parameters used by all the objects. Typically this is the subclass types for the other objects and parameters for the users specific requirements. The ``loadparams`` module can be used read parameter values from a configuration file. - -Optimizer - This acts as a wrapper to the ``Scipy.optimize`` functions that perform the work of the pulse optimisation algorithms. Using the main classes the user can specify which of the optimisation methods are to be used. There are subclasses specifically for the BFGS and L-BFGS-B methods. There is another subclass for using the CRAB algorithm. - -Dynamics - This is mainly a container for the lists that hold the dynamics generators, propagators, and time evolution operators in each timeslot. The combining of dynamics generators is also complete by this object. Different subclasses support a range of types of quantum systems, including closed systems with unitary dynamics, systems with quadratic Hamiltonians that have Gaussian states and symplectic transforms, and a general subclass that can be used for open system dynamics with Lindbladian operators. - -PulseGen - There are many subclasses of pulse generators that generate different types of pulses as the initial amplitudes for the optimisation. Often the goal cannot be achieved from all starting conditions, and then typically some kind of random pulse is used and repeated optimisations are performed until the desired infidelity is reached or the minimum infidelity found is reported. - There is a specific subclass that is used by the CRAB algorithm to generate the pulses based on the basis coefficients that are being optimised. - -TerminationConditions - This is simply a convenient place to hold all the properties that will determine when the single optimisation run terminates. Limits can be set for number of iterations, time, and of course the target infidelity. - -Stats - Performance data are optionally collected during the optimisation. This object is shared to a single location to store, calculate and report run statistics. - -FidelityComputer - The subclass of the fidelity computer determines the type of fidelity measure. These are closely linked to the type of dynamics in use. These are also the most commonly user customised subclasses. - -PropagatorComputer - This object computes propagators from one timeslot to the next and also the propagator gradient. The options are using the spectral decomposition or Frechet derivative, as discussed above. - -TimeslotComputer - Here the time evolution is computed by calling the methods of the other computer objects. - -OptimResult - The result of a pulse optimisation run is returned as an object with properties for the outcome in terms of the infidelity, reason for termination, performance statistics, final evolution, and more. +The Quantum Control part of qutip has been moved to its own project. -.. _pulseoptim-functions: +The previously available implementation is now located in the `qutip-qtrl `_ module. If the ``qutip-qtrl`` package is installed, it can also be imported under the name ``qutip.control`` to ease porting code developed for QuTiP 4 to QuTiP 5. -Using the pulseoptim functions -============================== -The simplest method for optimising a control pulse is to call one of the functions in the ``pulseoptim`` module. This automates the creation and configuration of the necessary objects, generation of initial pulses, running the optimisation and returning the result. There are functions specifically for unitary dynamics, and also specifically for the CRAB algorithm (GRAPE is the default). The ``optimise_pulse`` function can in fact be used for unitary dynamics and / or the CRAB algorithm, the more specific functions simply have parameter names that are more familiar in that application. +A newer interface with upgraded capacities is being developped in `qutip-qoc `_. -A semi-automated method is to use the ``create_optimizer_objects`` function to generate and configure all the objects, then manually set the initial pulse and call the optimisation. This would be more efficient when repeating runs with different starting conditions. +Please give these modules a try. diff --git a/doc/guide/guide-correlation.rst b/doc/guide/guide-correlation.rst index 2ca650263f..a7362e42cd 100644 --- a/doc/guide/guide-correlation.rst +++ b/doc/guide/guide-correlation.rst @@ -4,7 +4,7 @@ Two-time correlation functions ****************************** -With the QuTiP time-evolution functions (for example :func:`qutip.mesolve` and :func:`qutip.mcsolve`), a state vector or density matrix can be evolved from an initial state at :math:`t_0` to an arbitrary time :math:`t`, :math:`\rho(t)=V(t, t_0)\left\{\rho(t_0)\right\}`, where :math:`V(t, t_0)` is the propagator defined by the equation of motion. The resulting density matrix can then be used to evaluate the expectation values of arbitrary combinations of *same-time* operators. +With the QuTiP time-evolution functions (for example :func:`.mesolve` and :func:`.mcsolve`), a state vector or density matrix can be evolved from an initial state at :math:`t_0` to an arbitrary time :math:`t`, :math:`\rho(t)=V(t, t_0)\left\{\rho(t_0)\right\}`, where :math:`V(t, t_0)` is the propagator defined by the equation of motion. The resulting density matrix can then be used to evaluate the expectation values of arbitrary combinations of *same-time* operators. To calculate *two-time* correlation functions on the form :math:`\left`, we can use the quantum regression theorem (see, e.g., [Gar03]_) to write @@ -45,7 +45,7 @@ QuTiP provides a family of functions that assists in the process of calculating +----------------------------------+--------------------------------------------------+ -The most common use-case is to calculate the two time correlation function :math:`\left`. :func:`qutip.correlation_2op_1t` performs this task with sensible default values, but only allows using the :func:`mesolve` solver. From QuTiP 5.0 we added :func:`qutip.correlation_3op`. This function can also calculate correlation functions with two or three operators and with one or two times. Most importantly, this function accepts alternative solvers such as :func:`brmesolve`. +The most common use-case is to calculate the two time correlation function :math:`\left`. :func:`.correlation_2op_1t` performs this task with sensible default values, but only allows using the :func:`.mesolve` solver. From QuTiP 5.0 we added :func:`.correlation_3op`. This function can also calculate correlation functions with two or three operators and with one or two times. Most importantly, this function accepts alternative solvers such as :func:`.brmesolve`. .. _correlation-steady: @@ -55,7 +55,7 @@ Steadystate correlation function The following code demonstrates how to calculate the :math:`\left` correlation for a leaky cavity with three different relaxation rates. .. plot:: - :context: + :context: close-figs times = np.linspace(0,10.0,200) a = destroy(10) @@ -85,7 +85,7 @@ Given a correlation function :math:`\left` we can define the S(\omega) = \int_{-\infty}^{\infty} \left e^{-i\omega\tau} d\tau. -In QuTiP, we can calculate :math:`S(\omega)` using either :func:`qutip.correlation.spectrum_ss`, which first calculates the correlation function using one of the time-dependent solvers and then performs the Fourier transform semi-analytically, or we can use the function :func:`qutip.correlation.spectrum_correlation_fft` to numerically calculate the Fourier transform of a given correlation data using FFT. +In QuTiP, we can calculate :math:`S(\omega)` using either :func:`.spectrum`, which first calculates the correlation function using one of the time-dependent solvers and then performs the Fourier transform semi-analytically, or we can use the function :func:`.spectrum_correlation_fft` to numerically calculate the Fourier transform of a given correlation data using FFT. The following example demonstrates how these two functions can be used to obtain the emission power spectrum. @@ -99,13 +99,13 @@ The following example demonstrates how these two functions can be used to obtain Non-steadystate correlation function ==================================== -More generally, we can also calculate correlation functions of the kind :math:`\left`, i.e., the correlation function of a system that is not in its steady state. In QuTiP, we can evaluate such correlation functions using the function :func:`qutip.correlation.correlation_2op_2t`. The default behavior of this function is to return a matrix with the correlations as a function of the two time coordinates (:math:`t_1` and :math:`t_2`). +More generally, we can also calculate correlation functions of the kind :math:`\left`, i.e., the correlation function of a system that is not in its steady state. In QuTiP, we can evaluate such correlation functions using the function :func:`.correlation_2op_2t`. The default behavior of this function is to return a matrix with the correlations as a function of the two time coordinates (:math:`t_1` and :math:`t_2`). .. plot:: guide/scripts/correlation_ex2.py :width: 5.0in :include-source: -However, in some cases we might be interested in the correlation functions on the form :math:`\left`, but only as a function of time coordinate :math:`t_2`. In this case we can also use the :func:`qutip.correlation.correlation_2op_2t` function, if we pass the density matrix at time :math:`t_1` as second argument, and `None` as third argument. The :func:`qutip.correlation.correlation_2op_2t` function then returns a vector with the correlation values corresponding to the times in `taulist` (the fourth argument). +However, in some cases we might be interested in the correlation functions on the form :math:`\left`, but only as a function of time coordinate :math:`t_2`. In this case we can also use the :func:`.correlation_2op_2t` function, if we pass the density matrix at time :math:`t_1` as second argument, and `None` as third argument. The :func:`.correlation_2op_2t` function then returns a vector with the correlation values corresponding to the times in `taulist` (the fourth argument). Example: first-order optical coherence function ----------------------------------------------- @@ -116,7 +116,7 @@ This example demonstrates how to calculate a correlation function on the form :m :width: 5.0in :include-source: -For convenience, the steps for calculating the first-order coherence function have been collected in the function :func:`qutip.correlation.coherence_function_g1`. +For convenience, the steps for calculating the first-order coherence function have been collected in the function :func:`.coherence_function_g1`. Example: second-order optical coherence function ------------------------------------------------ @@ -129,7 +129,7 @@ The second-order optical coherence function, with time-delay :math:`\tau`, is de For a coherent state :math:`g^{(2)}(\tau) = 1`, for a thermal state :math:`g^{(2)}(\tau=0) = 2` and it decreases as a function of time (bunched photons, they tend to appear together), and for a Fock state with :math:`n` photons :math:`g^{(2)}(\tau = 0) = n(n - 1)/n^2 < 1` and it increases with time (anti-bunched photons, more likely to arrive separated in time). -To calculate this type of correlation function with QuTiP, we can use :func:`qutip.correlation.correlation_3op_1t`, which computes a correlation function on the form :math:`\left` (three operators, one delay-time vector). +To calculate this type of correlation function with QuTiP, we can use :func:`.correlation_3op_1t`, which computes a correlation function on the form :math:`\left` (three operators, one delay-time vector). We first have to combine the central two operators into one single one as they are evaluated at the same time, e.g. here we do :math:`a^\dagger(\tau)a(\tau) = (a^\dagger a)(\tau)`. The following code calculates and plots :math:`g^{(2)}(\tau)` as a function of :math:`\tau` for a coherent, thermal and Fock state. @@ -138,4 +138,4 @@ The following code calculates and plots :math:`g^{(2)}(\tau)` as a function of : :width: 5.0in :include-source: -For convenience, the steps for calculating the second-order coherence function have been collected in the function :func:`qutip.correlation.coherence_function_g2`. +For convenience, the steps for calculating the second-order coherence function have been collected in the function :func:`.coherence_function_g2`. diff --git a/doc/guide/guide-measurement.rst b/doc/guide/guide-measurement.rst index 2d74d1fab7..89149da74d 100644 --- a/doc/guide/guide-measurement.rst +++ b/doc/guide/guide-measurement.rst @@ -42,8 +42,8 @@ along the z-axis. We choose what to measure (in this case) by selecting a **measurement operator**. For example, -we could select :func:`~qutip.operators.sigmaz` which measures the z-component of the -spin of a spin-1/2 particle, or :func:`~qutip.operators.sigmax` which measures the +we could select :func:`.sigmaz` which measures the z-component of the +spin of a spin-1/2 particle, or :func:`.sigmax` which measures the x-component: .. testcode:: @@ -276,7 +276,7 @@ when called with a single observable: - `eigenstates` is an array of the eigenstates of the measurement operator, i.e. a list of the possible final states after the measurement is complete. - Each element of the array is a :obj:`~qutip.Qobj`. + Each element of the array is a :obj:`.Qobj`. - `probabilities` is a list of the probabilities of each measurement result. In our example the value is `[0.5, 0.5]` since the `up` state has equal @@ -343,7 +343,7 @@ the following result. The function :func:`~qutip.measurement.measurement_statistics` then returns two values: * `collapsed_states` is an array of the possible final states after the - measurement is complete. Each element of the array is a :obj:`~qutip.Qobj`. + measurement is complete. Each element of the array is a :obj:`.Qobj`. * `probabilities` is a list of the probabilities of each measurement outcome. diff --git a/doc/guide/guide-parfor.rst b/doc/guide/guide-parfor.rst deleted file mode 100644 index 4491cf30af..0000000000 --- a/doc/guide/guide-parfor.rst +++ /dev/null @@ -1,119 +0,0 @@ -.. _parfor: - -****************************************** -Parallel computation -****************************************** - -Parallel map and parallel for-loop ----------------------------------- - -Often one is interested in the output of a given function as a single-parameter is varied. -For instance, we can calculate the steady-state response of our system as the driving frequency is varied. -In cases such as this, where each iteration is independent of the others, we can speedup the calculation by performing the iterations in parallel. -In QuTiP, parallel computations may be performed using the :func:`qutip.solver.parallel.parallel_map` function. - -To use the this function we need to define a function of one or more variables, and the range over which one of these variables are to be evaluated. For example: - - -.. doctest:: - :skipif: not os_nt - :options: +NORMALIZE_WHITESPACE - - >>> result = parallel_map(func1, range(10)) - - >>> result_array = np.array(result) - - >>> print(result_array[:, 0]) # == a - [0 1 2 3 4 5 6 7 8 9] - - >>> print(result_array[:, 1]) # == b - [ 0 1 4 9 16 25 36 49 64 81] - - >>> print(result_array[:, 2]) # == c - [ 0 1 8 27 64 125 216 343 512 729] - - >>> print(result) - [(0, 0, 0), (1, 1, 1), (2, 4, 8), (3, 9, 27), (4, 16, 64)] - - -The :func:`qutip.solver.parallel.parallel_map` function is not limited to just numbers, but also works for a variety of outputs: - -.. doctest:: - :skipif: not os_nt - :options: +NORMALIZE_WHITESPACE - - >>> def func2(x): return x, Qobj(x), 'a' * x - - >>> results = parallel_map(func2, range(5)) - - >>> print([result[0] for result in results]) - [0 1 2 3 4] - - >>> print([result[1] for result in results]) - [Quantum object: dims = [[1], [1]], shape = (1, 1), type = bra - Qobj data = - [[0.]] - Quantum object: dims = [[1], [1]], shape = (1, 1), type = bra - Qobj data = - [[1.]] - Quantum object: dims = [[1], [1]], shape = (1, 1), type = bra - Qobj data = - [[2.]] - Quantum object: dims = [[1], [1]], shape = (1, 1), type = bra - Qobj data = - [[3.]] - Quantum object: dims = [[1], [1]], shape = (1, 1), type = bra - Qobj data = - [[4.]]] - - >>>print([result[2] for result in results]) - ['' 'a' 'aa' 'aaa' 'aaaa'] - - -One can also define functions with **multiple** input arguments and keyword arguments. - -.. doctest:: - :skipif: not os_nt - :options: +NORMALIZE_WHITESPACE - - >>> def sum_diff(x, y, z=0): return x + y, x - y, z - - >>> parallel_map(sum_diff, [1, 2, 3], task_args=(np.array([4, 5, 6]),), task_kwargs=dict(z=5.0)) - [(array([5, 6, 7]), array([-3, -4, -5]), 5.0), - (array([6, 7, 8]), array([-2, -3, -4]), 5.0), - (array([7, 8, 9]), array([-1, -2, -3]), 5.0)] - - -The :func:`qutip.solver.parallel.parallel_map` function supports progressbar by setting the keyword argument `progress_bar` to `True`. -The number of cpu used can also be controlled using the `map_kw` keyword, per default, all available cpus are used. - -.. doctest:: - :options: +SKIP - - >>> import time - - >>> def func(x): time.sleep(1) - - >>> result = parallel_map(func, range(50), progress_bar=True, map_kw={"num_cpus": 2}) - - 10.0%. Run time: 3.10s. Est. time left: 00:00:00:27 - 20.0%. Run time: 5.11s. Est. time left: 00:00:00:20 - 30.0%. Run time: 8.11s. Est. time left: 00:00:00:18 - 40.0%. Run time: 10.15s. Est. time left: 00:00:00:15 - 50.0%. Run time: 13.15s. Est. time left: 00:00:00:13 - 60.0%. Run time: 15.15s. Est. time left: 00:00:00:10 - 70.0%. Run time: 18.15s. Est. time left: 00:00:00:07 - 80.0%. Run time: 20.15s. Est. time left: 00:00:00:05 - 90.0%. Run time: 23.15s. Est. time left: 00:00:00:02 - 100.0%. Run time: 25.15s. Est. time left: 00:00:00:00 - Total run time: 28.91s - -There is a function called :func:`qutip.solver.parallel.serial_map` that works as a non-parallel drop-in replacement for :func:`qutip.solver.parallel.parallel_map`, which allows easy switching between serial and parallel computation. -Qutip also has the function :func:`qutip.solver.parallel.loky_map` as another drop-in replacement. It use the `loky` module instead of `multiprocessing` to run in parallel. -Parallel processing is useful for repeated tasks such as generating plots corresponding to the dynamical evolution of your system, or simultaneously simulating different parameter configurations. - - -IPython-based parallel_map --------------------------- - -When QuTiP is used with IPython interpreter, there is an alternative parallel for-loop implementation in the QuTiP module :func:`qutip.ipynbtools`, see :func:`qutip.ipynbtools.parallel_map`. The advantage of this parallel_map implementation is based on IPython's powerful framework for parallelization, so the compute processes are not confined to run on the same host as the main process. diff --git a/doc/guide/guide-piqs.rst b/doc/guide/guide-piqs.rst index f13cf8b3a4..3167918a85 100644 --- a/doc/guide/guide-piqs.rst +++ b/doc/guide/guide-piqs.rst @@ -32,7 +32,7 @@ where :math:`J_{\alpha,n}=\frac{1}{2}\sigma_{\alpha,n}` are SU(2) Pauli spin ope The inclusion of local processes in the dynamics lead to using a Liouvillian space of dimension :math:`4^N`. By exploiting the permutational invariance of identical particles [2-8], the Liouvillian :math:`\mathcal{D}_\text{TLS}(\rho)` can be built as a block-diagonal matrix in the basis of Dicke states :math:`|j, m \rangle`. The system under study is defined by creating an object of the -:code:`Dicke` class, e.g. simply named +:class:`~qutip.piqs.piqs.Dicke` class, e.g. simply named :code:`system`, whose first attribute is - :code:`system.N`, the number of TLSs of the system :math:`N`. @@ -48,8 +48,10 @@ The rates for collective and local processes are simply defined as Then the :code:`system.lindbladian()` creates the total TLS Lindbladian superoperator matrix. Similarly, :code:`system.hamiltonian` defines the TLS hamiltonian of the system :math:`H_\text{TLS}`. -The system's Liouvillian can be built using :code:`system.liouvillian()`. The properties of a Piqs object can be visualized by simply calling -:code:`system`. We give two basic examples on the use of *PIQS*. In the first example the incoherent emission of N driven TLSs is considered. +The system's Liouvillian can be built using :code:`system.liouvillian()`. +The properties of a Piqs object can be visualized by simply calling :code:`system`. +We give two basic examples on the use of *PIQS*. +In the first example the incoherent emission of N driven TLSs is considered. .. code-block:: python diff --git a/doc/guide/guide-random.rst b/doc/guide/guide-random.rst index 82b5920ea8..86d0abc1cd 100644 --- a/doc/guide/guide-random.rst +++ b/doc/guide/guide-random.rst @@ -11,7 +11,7 @@ Generating Random Quantum States & Operators QuTiP includes a collection of random state, unitary and channel generators for simulations, Monte Carlo evaluation, theorem evaluation, and code testing. Each of these objects can be sampled from one of several different distributions. -For example, a random Hermitian operator can be sampled by calling `rand_herm` function: +For example, a random Hermitian operator can be sampled by calling :func:`.rand_herm` function: .. doctest:: [random] :hide: @@ -40,23 +40,23 @@ For example, a random Hermitian operator can be sampled by calling `rand_herm` f .. cssclass:: table-striped -+-------------------------------+--------------------------------------------+------------------------------------------+ -| Random Variable Type | Sampling Functions | Dimensions | -+===============================+============================================+==========================================+ -| State vector (``ket``) | `rand_ket`, | :math:`N \times 1` | -+-------------------------------+--------------------------------------------+------------------------------------------+ -| Hermitian operator (``oper``) | `rand_herm` | :math:`N \times N` | -+-------------------------------+--------------------------------------------+------------------------------------------+ -| Density operator (``oper``) | `rand_dm`, | :math:`N \times N` | -+-------------------------------+--------------------------------------------+------------------------------------------+ -| Unitary operator (``oper``) | `rand_unitary`, | :math:`N \times N` | -+-------------------------------+--------------------------------------------+------------------------------------------+ -| stochastic matrix (``oper``) | `rand_stochastic`, | :math:`N \times N` | -+-------------------------------+--------------------------------------------+------------------------------------------+ -| CPTP channel (``super``) | `rand_super`, `rand_super_bcsz` | :math:`(N \times N) \times (N \times N)` | -+-------------------------------+--------------------------------------------+------------------------------------------+ -| CPTP map (list of ``oper``) | `rand_kraus_map` | :math:`N \times N` (N**2 operators) | -+-------------------------------+--------------------------------------------+------------------------------------------+ ++-------------------------------+-----------------------------------------------+------------------------------------------+ +| Random Variable Type | Sampling Functions | Dimensions | ++===============================+===============================================+==========================================+ +| State vector (``ket``) | :func:`.rand_ket` | :math:`N \times 1` | ++-------------------------------+-----------------------------------------------+------------------------------------------+ +| Hermitian operator (``oper``) | :func:`.rand_herm` | :math:`N \times N` | ++-------------------------------+-----------------------------------------------+------------------------------------------+ +| Density operator (``oper``) | :func:`.rand_dm` | :math:`N \times N` | ++-------------------------------+-----------------------------------------------+------------------------------------------+ +| Unitary operator (``oper``) | :func:`.rand_unitary` | :math:`N \times N` | ++-------------------------------+-----------------------------------------------+------------------------------------------+ +| stochastic matrix (``oper``) | :func:`.rand_stochastic` | :math:`N \times N` | ++-------------------------------+-----------------------------------------------+------------------------------------------+ +| CPTP channel (``super``) | :func:`.rand_super`, :func:`.rand_super_bcsz` | :math:`(N \times N) \times (N \times N)` | ++-------------------------------+-----------------------------------------------+------------------------------------------+ +| CPTP map (list of ``oper``) | :func:`.rand_kraus_map` | :math:`N \times N` (N**2 operators) | ++-------------------------------+-----------------------------------------------+------------------------------------------+ In all cases, these functions can be called with a single parameter :math:`dimensions` that can be the size of the relevant Hilbert space or the dimensions of a random state, unitary or channel. @@ -69,9 +69,9 @@ In all cases, these functions can be called with a single parameter :math:`dimen >>> rand_super_bcsz([[2, 3], [2, 3]]).dims [[[2, 3], [2, 3]], [[2, 3], [2, 3]]] -Several of the random `Qobj` function in QuTiP support additional parameters as well, namely *density* and *distribution*. -`rand_dm`, `rand_herm`, `rand_unitary` and `rand_ket` can be created using multiple method controlled by *distribution*. -The `rand_ket`, `rand_herm` and `rand_unitary` functions can return quantum objects such that a fraction of the elements are identically equal to zero. +Several of the random :class:`.Qobj` function in QuTiP support additional parameters as well, namely *density* and *distribution*. +:func:`.rand_dm`, :func:`.rand_herm`, :func:`.rand_unitary` and :func:`.rand_ket` can be created using multiple method controlled by *distribution*. +The :func:`.rand_ket`, :func:`.rand_herm` and :func:`.rand_unitary` functions can return quantum objects such that a fraction of the elements are identically equal to zero. The ratio of nonzero elements is passed as the ``density`` keyword argument. By contrast, `rand_super_bcsz` take as an argument the rank of the generated object, such that passing ``rank=1`` returns a random pure state or unitary channel, respectively. Passing ``rank=None`` specifies that the generated object should be full-rank for the given dimension. @@ -115,7 +115,7 @@ See the API documentation: :ref:`functions-rand` for details. Random objects with a given eigen spectrum ========================================== -It is also possible to generate random Hamiltonian (``rand_herm``) and densitiy matrices (``rand_dm``) with a given eigen spectrum. +It is also possible to generate random Hamiltonian (:func:`.rand_herm`) and densitiy matrices (:func:`.rand_dm`) with a given eigen spectrum. This is done by passing an array to eigenvalues argument to either function and choosing the "eigen" distribution. For example, @@ -153,9 +153,9 @@ This technique requires many steps to build the desired quantum object, and is t Composite random objects ======================== -In many cases, one is interested in generating random quantum objects that correspond to composite systems generated using the :func:`qutip.tensor.tensor` function. +In many cases, one is interested in generating random quantum objects that correspond to composite systems generated using the :func:`.tensor` function. Specifying the tensor structure of a quantum object is done passing a list for the first argument. -The resulting quantum objects size will be the product of the elements in the list and the resulting :class:`qutip.Qobj` dimensions will be ``[dims, dims]``: +The resulting quantum objects size will be the product of the elements in the list and the resulting :class:`.Qobj` dimensions will be ``[dims, dims]``: .. doctest:: [random] :hide: diff --git a/doc/guide/guide-saving.rst b/doc/guide/guide-saving.rst index ca78c98558..d419dfd24d 100644 --- a/doc/guide/guide-saving.rst +++ b/doc/guide/guide-saving.rst @@ -10,7 +10,7 @@ With time-consuming calculations it is often necessary to store the results to f Storing and loading QuTiP objects ================================= -To store and load arbitrary QuTiP related objects (:class:`qutip.Qobj`, :class:`qutip.solve.solver.Result`, etc.) there are two functions: :func:`qutip.fileio.qsave` and :func:`qutip.fileio.qload`. The function :func:`qutip.fileio.qsave` takes an arbitrary object as first parameter and an optional filename as second parameter (default filename is `qutip_data.qu`). The filename extension is always `.qu`. The function :func:`qutip.fileio.qload` takes a mandatory filename as first argument and loads and returns the objects in the file. +To store and load arbitrary QuTiP related objects (:class:`.Qobj`, :class:`.Result`, etc.) there are two functions: :func:`qutip.fileio.qsave` and :func:`qutip.fileio.qload`. The function :func:`qutip.fileio.qsave` takes an arbitrary object as first parameter and an optional filename as second parameter (default filename is `qutip_data.qu`). The filename extension is always `.qu`. The function :func:`qutip.fileio.qload` takes a mandatory filename as first argument and loads and returns the objects in the file. To illustrate how these functions can be used, consider a simple calculation of the steadystate of the harmonic oscillator :: @@ -18,7 +18,7 @@ To illustrate how these functions can be used, consider a simple calculation of >>> c_ops = [np.sqrt(0.5) * a, np.sqrt(0.25) * a.dag()] >>> rho_ss = steadystate(H, c_ops) -The steadystate density matrix `rho_ss` is an instance of :class:`qutip.Qobj`. It can be stored to a file `steadystate.qu` using :: +The steadystate density matrix `rho_ss` is an instance of :class:`.Qobj`. It can be stored to a file `steadystate.qu` using :: >>> qsave(rho_ss, 'steadystate') >>> !ls *.qu @@ -32,7 +32,8 @@ and it can later be loaded again, and used in further calculations :: >>> a = destroy(10) >>> np.testing.assert_almost_equal(expect(a.dag() * a, rho_ss_loaded), 0.9902248289345061) -The nice thing about the :func:`qutip.fileio.qsave` and :func:`qutip.fileio.qload` functions is that almost any object can be stored and load again later on. We can for example store a list of density matrices as returned by :func:`qutip.mesolve` :: +The nice thing about the :func:`qutip.fileio.qsave` and :func:`qutip.fileio.qload` functions is that almost any object can be stored and load again later on. +We can for example store a list of density matrices as returned by :func:`.mesolve` :: >>> a = destroy(10); H = a.dag() * a ; c_ops = [np.sqrt(0.5) * a, np.sqrt(0.25) * a.dag()] >>> psi0 = rand_ket(10) @@ -65,7 +66,7 @@ The :func:`qutip.fileio.file_data_store` takes two mandatory and three optional where `filename` is the name of the file, `data` is the data to be written to the file (must be a *numpy* array), `numtype` (optional) is a flag indicating numerical type that can take values `complex` or `real`, `numformat` (optional) specifies the numerical format that can take the values `exp` for the format `1.0e1` and `decimal` for the format `10.0`, and `sep` (optional) is an arbitrary single-character field separator (usually a tab, space, comma, semicolon, etc.). -A common use for the :func:`qutip.fileio.file_data_store` function is to store the expectation values of a set of operators for a sequence of times, e.g., as returned by the :func:`qutip.mesolve` function, which is what the following example does +A common use for the :func:`qutip.fileio.file_data_store` function is to store the expectation values of a set of operators for a sequence of times, e.g., as returned by the :func:`.mesolve` function, which is what the following example does .. plot:: :context: diff --git a/doc/guide/guide-states.rst b/doc/guide/guide-states.rst index 3cabf6e020..4d4efc3c53 100644 --- a/doc/guide/guide-states.rst +++ b/doc/guide/guide-states.rst @@ -17,7 +17,7 @@ In the previous guide section :ref:`basics`, we saw how to create states and ope State Vectors (kets or bras) ============================== -Here we begin by creating a Fock :func:`qutip.states.basis` vacuum state vector :math:`\left|0\right>` with in a Hilbert space with 5 number states, from 0 to 4: +Here we begin by creating a Fock :func:`.basis` vacuum state vector :math:`\left|0\right>` with in a Hilbert space with 5 number states, from 0 to 4: .. testcode:: [states] @@ -41,7 +41,7 @@ Here we begin by creating a Fock :func:`qutip.states.basis` vacuum state vector -and then create a lowering operator :math:`\left(\hat{a}\right)` corresponding to 5 number states using the :func:`qutip.destroy` function: +and then create a lowering operator :math:`\left(\hat{a}\right)` corresponding to 5 number states using the :func:`.destroy` function: .. testcode:: [states] @@ -102,7 +102,8 @@ We see that, as expected, the vacuum is transformed to the zero vector. A more [0.] [0.]] -The raising operator has in indeed raised the state `vec` from the vacuum to the :math:`\left| 1\right>` state. Instead of using the dagger ``Qobj.dag()`` method to raise the state, we could have also used the built in :func:`qutip.create` function to make a raising operator: +The raising operator has in indeed raised the state `vec` from the vacuum to the :math:`\left| 1\right>` state. +Instead of using the dagger ``Qobj.dag()`` method to raise the state, we could have also used the built in :func:`.create` function to make a raising operator: .. testcode:: [states] @@ -237,7 +238,9 @@ Notice how in this last example, application of the number operator does not giv [0.] [0.]] -Since we are giving a demonstration of using states and operators, we have done a lot more work than we should have. For example, we do not need to operate on the vacuum state to generate a higher number Fock state. Instead we can use the :func:`qutip.states.basis` (or :func:`qutip.states.fock`) function to directly obtain the required state: +Since we are giving a demonstration of using states and operators, we have done a lot more work than we should have. +For example, we do not need to operate on the vacuum state to generate a higher number Fock state. +Instead we can use the :func:`.basis` (or :func:`.fock`) function to directly obtain the required state: .. testcode:: [states] @@ -258,7 +261,7 @@ Since we are giving a demonstration of using states and operators, we have done [0.] [0.]] -Notice how it is automatically normalized. We can also use the built in :func:`qutip.num` operator: +Notice how it is automatically normalized. We can also use the built in :func:`.num` operator: .. testcode:: [states] @@ -319,7 +322,7 @@ We can also create superpositions of states: [0. ] [0. ]] -where we have used the :func:`qutip.Qobj.unit` method to again normalize the state. Operating with the number function again: +where we have used the :meth:`.Qobj.unit` method to again normalize the state. Operating with the number function again: .. testcode:: [states] @@ -338,7 +341,7 @@ where we have used the :func:`qutip.Qobj.unit` method to again normalize the sta [0. ] [0. ]] -We can also create coherent states and squeezed states by applying the :func:`qutip.displace` and :func:`qutip.squeeze` functions to the vacuum state: +We can also create coherent states and squeezed states by applying the :func:`.displace` and :func:`.squeeze` functions to the vacuum state: .. testcode:: [states] @@ -380,7 +383,7 @@ We can also create coherent states and squeezed states by applying the :func:`qu [-0.02688063-0.23828775j] [ 0.26352814+0.11512178j]] -Of course, displacing the vacuum gives a coherent state, which can also be generated using the built in :func:`qutip.states.coherent` function. +Of course, displacing the vacuum gives a coherent state, which can also be generated using the built in :func:`.coherent` function. .. _states-dm: @@ -411,7 +414,7 @@ The simplest density matrix is created by forming the outer-product :math:`\left [0. 0. 0. 0. 0.] [0. 0. 0. 0. 0.]] -A similar task can also be accomplished via the :func:`qutip.states.fock_dm` or :func:`qutip.states.ket2dm` functions: +A similar task can also be accomplished via the :func:`.fock_dm` or :func:`.ket2dm` functions: .. testcode:: [states] @@ -466,7 +469,8 @@ If we want to create a density matrix with equal classical probability of being [0. 0. 0. 0. 0. ] [0. 0. 0. 0. 0.5]] -or use ``0.5 * fock_dm(5, 2) + 0.5 * fock_dm(5, 4)``. There are also several other built-in functions for creating predefined density matrices, for example :func:`qutip.states.coherent_dm` and :func:`qutip.states.thermal_dm` which create coherent state and thermal state density matrices, respectively. +or use ``0.5 * fock_dm(5, 2) + 0.5 * fock_dm(5, 4)``. +There are also several other built-in functions for creating predefined density matrices, for example :func:`.coherent_dm` and :func:`.thermal_dm` which create coherent state and thermal state density matrices, respectively. .. testcode:: [states] @@ -503,7 +507,8 @@ or use ``0.5 * fock_dm(5, 2) + 0.5 * fock_dm(5, 4)``. There are also several oth [0. 0. 0. 0.08046635 0. ] [0. 0. 0. 0. 0.04470353]] -QuTiP also provides a set of distance metrics for determining how close two density matrix distributions are to each other. Included are the trace distance :func:`qutip.core.metrics.tracedist`, fidelity :func:`qutip.core.metrics.fidelity`, Hilbert-Schmidt distance :func:`qutip.core.metrics.hilbert_dist`, Bures distance :func:`qutip.core.metrics.bures_dist`, Bures angle :func:`qutip.core.metrics.bures_angle`, and quantum Hellinger distance :func:`qutip.core.metrics.hellinger_dist`. +QuTiP also provides a set of distance metrics for determining how close two density matrix distributions are to each other. +Included are the trace distance :func:`.tracedist`, fidelity :func:`.fidelity`, Hilbert-Schmidt distance :func:`.hilbert_dist`, Bures distance :func:`.bures_dist`, Bures angle :func:`.bures_angle`, and quantum Hellinger distance :func:`.hellinger_dist`. .. testcode:: [states] @@ -534,7 +539,7 @@ For a pure state and a mixed state, :math:`1 - F^{2} \le T` which can also be ve Qubit (two-level) systems ========================= -Having spent a fair amount of time on basis states that represent harmonic oscillator states, we now move on to qubit, or two-level quantum systems (for example a spin-1/2). To create a state vector corresponding to a qubit system, we use the same :func:`qutip.states.basis`, or :func:`qutip.states.fock`, function with only two levels: +Having spent a fair amount of time on basis states that represent harmonic oscillator states, we now move on to qubit, or two-level quantum systems (for example a spin-1/2). To create a state vector corresponding to a qubit system, we use the same :func:`.basis`, or :func:`.fock`, function with only two levels: .. testcode:: [states] @@ -547,7 +552,7 @@ Now at this point one may ask how this state is different than that of a harmoni vac = basis(2, 0) -At this stage, there is no difference. This should not be surprising as we called the exact same function twice. The difference between the two comes from the action of the spin operators :func:`qutip.sigmax`, :func:`qutip.sigmay`, :func:`qutip.sigmaz`, :func:`qutip.sigmap`, and :func:`qutip.sigmam` on these two-level states. For example, if ``vac`` corresponds to the vacuum state of a harmonic oscillator, then, as we have already seen, we can use the raising operator to get the :math:`\left|1\right>` state: +At this stage, there is no difference. This should not be surprising as we called the exact same function twice. The difference between the two comes from the action of the spin operators :func:`.sigmax`, :func:`.sigmay`, :func:`.sigmaz`, :func:`.sigmap`, and :func:`.sigmam` on these two-level states. For example, if ``vac`` corresponds to the vacuum state of a harmonic oscillator, then, as we have already seen, we can use the raising operator to get the :math:`\left|1\right>` state: .. testcode:: [states] @@ -579,7 +584,7 @@ At this stage, there is no difference. This should not be surprising as we call [[0.] [1.]] -For a spin system, the operator analogous to the raising operator is the sigma-plus operator :func:`qutip.sigmap`. Operating on the ``spin`` state gives: +For a spin system, the operator analogous to the raising operator is the sigma-plus operator :func:`.sigmap`. Operating on the ``spin`` state gives: .. testcode:: [states] @@ -609,7 +614,7 @@ For a spin system, the operator analogous to the raising operator is the sigma-p [[0.] [0.]] -Now we see the difference! The :func:`qutip.sigmap` operator acting on the ``spin`` state returns the zero vector. Why is this? To see what happened, let us use the :func:`qutip.sigmaz` operator: +Now we see the difference! The :func:`.sigmap` operator acting on the ``spin`` state returns the zero vector. Why is this? To see what happened, let us use the :func:`.sigmaz` operator: .. testcode:: [states] @@ -669,7 +674,7 @@ Now we see the difference! The :func:`qutip.sigmap` operator acting on the ``sp [[ 0.] [-1.]] -The answer is now apparent. Since the QuTiP :func:`qutip.sigmaz` function uses the standard z-basis representation of the sigma-z spin operator, the ``spin`` state corresponds to the :math:`\left|\uparrow\right>` state of a two-level spin system while ``spin2`` gives the :math:`\left|\downarrow\right>` state. Therefore, in our previous example ``sigmap() * spin``, we raised the qubit state out of the truncated two-level Hilbert space resulting in the zero state. +The answer is now apparent. Since the QuTiP :func:`.sigmaz` function uses the standard z-basis representation of the sigma-z spin operator, the ``spin`` state corresponds to the :math:`\left|\uparrow\right>` state of a two-level spin system while ``spin2`` gives the :math:`\left|\downarrow\right>` state. Therefore, in our previous example ``sigmap() * spin``, we raised the qubit state out of the truncated two-level Hilbert space resulting in the zero state. While at first glance this convention might seem somewhat odd, it is in fact quite handy. For one, the spin operators remain in the conventional form. Second, when the spin system is in the :math:`\left|\uparrow\right>` state: @@ -689,14 +694,14 @@ While at first glance this convention might seem somewhat odd, it is in fact qui the non-zero component is the zeroth-element of the underlying matrix (remember that python uses c-indexing, and matrices start with the zeroth element). The :math:`\left|\downarrow\right>` state therefore has a non-zero entry in the first index position. This corresponds nicely with the quantum information definitions of qubit states, where the excited :math:`\left|\uparrow\right>` state is label as :math:`\left|0\right>`, and the :math:`\left|\downarrow\right>` state by :math:`\left|1\right>`. -If one wants to create spin operators for higher spin systems, then the :func:`qutip.jmat` function comes in handy. +If one wants to create spin operators for higher spin systems, then the :func:`.jmat` function comes in handy. .. _states-expect: Expectation values =================== -Some of the most important information about quantum systems comes from calculating the expectation value of operators, both Hermitian and non-Hermitian, as the state or density matrix of the system varies in time. Therefore, in this section we demonstrate the use of the :func:`qutip.expect` function. To begin: +Some of the most important information about quantum systems comes from calculating the expectation value of operators, both Hermitian and non-Hermitian, as the state or density matrix of the system varies in time. Therefore, in this section we demonstrate the use of the :func:`.expect` function. To begin: .. testcode:: [states] @@ -721,7 +726,7 @@ Some of the most important information about quantum systems comes from calculat np.testing.assert_almost_equal(expect(c, cat), 0.9999999999999998j) -The :func:`qutip.expect` function also accepts lists or arrays of state vectors or density matrices for the second input: +The :func:`.expect` function also accepts lists or arrays of state vectors or density matrices for the second input: .. testcode:: [states] @@ -749,9 +754,9 @@ The :func:`qutip.expect` function also accepts lists or arrays of state vectors [ 0.+0.j 0.+1.j -1.+0.j 0.-1.j] -Notice how in this last example, all of the return values are complex numbers. This is because the :func:`qutip.expect` function looks to see whether the operator is Hermitian or not. If the operator is Hermitian, then the output will always be real. In the case of non-Hermitian operators, the return values may be complex. Therefore, the :func:`qutip.expect` function will return an array of complex values for non-Hermitian operators when the input is a list/array of states or density matrices. +Notice how in this last example, all of the return values are complex numbers. This is because the :func:`.expect` function looks to see whether the operator is Hermitian or not. If the operator is Hermitian, then the output will always be real. In the case of non-Hermitian operators, the return values may be complex. Therefore, the :func:`.expect` function will return an array of complex values for non-Hermitian operators when the input is a list/array of states or density matrices. -Of course, the :func:`qutip.expect` function works for spin states and operators: +Of course, the :func:`.expect` function works for spin states and operators: .. testcode:: [states] @@ -797,8 +802,8 @@ in two copies of that Hilbert space, [Hav03]_, [Wat13]_. This isomorphism is implemented in QuTiP by the -:obj:`~qutip.superoperator.operator_to_vector` and -:obj:`~qutip.superoperator.vector_to_operator` functions: +:obj:`.operator_to_vector` and +:obj:`.vector_to_operator` functions: .. testcode:: [states] @@ -842,7 +847,7 @@ This isomorphism is implemented in QuTiP by the np.testing.assert_almost_equal((rho - rho2).norm(), 0) -The :attr:`~qutip.Qobj.type` attribute indicates whether a quantum object is +The :attr:`.Qobj.type` attribute indicates whether a quantum object is a vector corresponding to an operator (``operator-ket``), or its Hermitian conjugate (``operator-bra``). @@ -883,7 +888,7 @@ between :math:`\mathcal{L}(\mathcal{H})` and :math:`\mathcal{H} \otimes \mathcal Since :math:`\mathcal{H} \otimes \mathcal{H}` is a vector space, linear maps on this space can be represented as matrices, often called *superoperators*. -Using the :obj:`~qutip.Qobj`, the :obj:`~qutip.superoperator.spre` and :obj:`~qutip.superoperator.spost` functions, supermatrices +Using the :obj:`.Qobj`, the :obj:`.spre` and :obj:`.spost` functions, supermatrices corresponding to left- and right-multiplication respectively can be quickly constructed. @@ -893,7 +898,7 @@ constructed. S = spre(X) * spost(X.dag()) # Represents conjugation by X. -Note that this is done automatically by the :obj:`~qutip.superop_reps.to_super` function when given +Note that this is done automatically by the :obj:`.to_super` function when given ``type='oper'`` input. .. testcode:: [states] @@ -921,8 +926,8 @@ Quantum objects representing superoperators are denoted by ``type='super'``: [1. 0. 0. 0.]] Information about superoperators, such as whether they represent completely -positive maps, is exposed through the :attr:`~qutip.Qobj.iscp`, :attr:`~qutip.Qobj.istp` -and :attr:`~qutip.Qobj.iscptp` attributes: +positive maps, is exposed through the :attr:`.Qobj.iscp`, :attr:`.Qobj.istp` +and :attr:`.Qobj.iscptp` attributes: .. testcode:: [states] @@ -936,7 +941,7 @@ and :attr:`~qutip.Qobj.iscptp` attributes: True True True In addition, dynamical generators on this extended space, often called -*Liouvillian superoperators*, can be created using the :func:`~qutip.superoperator.liouvillian` function. Each of these takes a Hamiltonian along with +*Liouvillian superoperators*, can be created using the :func:`.liouvillian` function. Each of these takes a Hamiltonian along with a list of collapse operators, and returns a ``type="super"`` object that can be exponentiated to find the superoperator for that evolution. @@ -1001,8 +1006,8 @@ convention, J(\Lambda) = (\mathbb{1} \otimes \Lambda) [|\mathbb{1}\rangle\!\rangle \langle\!\langle \mathbb{1}|]. -In QuTiP, :math:`J(\Lambda)` can be found by calling the :func:`~qutip.superop_reps.to_choi` -function on a ``type="super"`` :obj:`~qutip.Qobj`. +In QuTiP, :math:`J(\Lambda)` can be found by calling the :func:`.to_choi` +function on a ``type="super"`` :obj:`.Qobj`. .. testcode:: [states] @@ -1042,7 +1047,7 @@ function on a ``type="super"`` :obj:`~qutip.Qobj`. [0. 0. 0. 0.] [1. 0. 0. 1.]] -If a :obj:`~qutip.Qobj` instance is already in the Choi :attr:`~qutip.Qobj.superrep`, then calling :func:`~qutip.superop_reps.to_choi` +If a :obj:`.Qobj` instance is already in the Choi :attr:`.Qobj.superrep`, then calling :func:`.to_choi` does nothing: .. testcode:: [states] @@ -1061,8 +1066,8 @@ does nothing: [0. 1. 1. 0.] [0. 0. 0. 0.]] -To get back to the superoperator representation, simply use the :func:`~qutip.superop_reps.to_super` function. -As with :func:`~qutip.superop_reps.to_choi`, :func:`~qutip.superop_reps.to_super` is idempotent: +To get back to the superoperator representation, simply use the :func:`.to_super` function. +As with :func:`.to_choi`, :func:`.to_super` is idempotent: .. testcode:: [states] @@ -1116,7 +1121,7 @@ we have that = \sum_i |A_i\rangle\!\rangle \langle\!\langle A_i| = J(\Lambda). The Kraus representation of a hermicity-preserving map can be found in QuTiP -using the :func:`~qutip.superop_reps.to_kraus` function. +using the :func:`.to_kraus` function. .. testcode:: [states] @@ -1219,9 +1224,9 @@ using the :func:`~qutip.superop_reps.to_kraus` function. [[0. 0. ] [0. 0.70710678]]] -As with the other representation conversion functions, :func:`~qutip.superop_reps.to_kraus` -checks the :attr:`~qutip.Qobj.superrep` attribute of its input, and chooses an appropriate -conversion method. Thus, in the above example, we can also call :func:`~qutip.superop_reps.to_kraus` +As with the other representation conversion functions, :func:`.to_kraus` +checks the :attr:`.Qobj.superrep` attribute of its input, and chooses an appropriate +conversion method. Thus, in the above example, we can also call :func:`.to_kraus` on ``J``. .. testcode:: [states] @@ -1285,7 +1290,7 @@ all operators :math:`X` acting on :math:`\mathcal{H}`, where the partial trace is over a new index that corresponds to the index in the Kraus summation. Conversion to Stinespring -is handled by the :func:`~qutip.superop_reps.to_stinespring` +is handled by the :func:`.to_stinespring` function. .. testcode:: [states] @@ -1373,7 +1378,7 @@ the :math:`\chi`-matrix representation, where :math:`\{B_\alpha\}` is a basis for the space of matrices acting on :math:`\mathcal{H}`. In QuTiP, this basis is taken to be the Pauli basis :math:`B_\alpha = \sigma_\alpha / \sqrt{2}`. Conversion to the -:math:`\chi` formalism is handled by the :func:`~qutip.superop_reps.to_chi` +:math:`\chi` formalism is handled by the :func:`.to_chi` function. .. testcode:: [states] @@ -1414,9 +1419,9 @@ the :math:`\chi_{00}` element: Here, the factor of 4 comes from the dimension of the underlying Hilbert space :math:`\mathcal{H}`. As with the superoperator and Choi representations, the :math:`\chi` representation is -denoted by the :attr:`~qutip.Qobj.superrep`, such that :func:`~qutip.superop_reps.to_super`, -:func:`~qutip.superop_reps.to_choi`, :func:`~qutip.superop_reps.to_kraus`, -:func:`~qutip.superop_reps.to_stinespring` and :func:`~qutip.superop_reps.to_chi` +denoted by the :attr:`.Qobj.superrep`, such that :func:`.to_super`, +:func:`.to_choi`, :func:`.to_kraus`, +:func:`.to_stinespring` and :func:`.to_chi` all convert from the :math:`\chi` representation appropriately. Properties of Quantum Maps @@ -1425,7 +1430,7 @@ Properties of Quantum Maps In addition to converting between the different representations of quantum maps, QuTiP also provides attributes to make it easy to check if a map is completely positive, trace preserving and/or hermicity preserving. Each of these attributes -uses :attr:`~qutip.Qobj.superrep` to automatically perform any needed conversions. +uses :attr:`.Qobj.superrep` to automatically perform any needed conversions. In particular, a quantum map is said to be positive (but not necessarily completely positive) if it maps all positive operators to positive operators. For instance, the @@ -1451,7 +1456,7 @@ with negative eigenvalues. Complete positivity addresses this by requiring that a map returns positive operators for all positive operators, and does so even under tensoring with another map. The Choi matrix is very useful here, as it can be shown that a map is completely positive if and only if its Choi matrix -is positive [Wat13]_. QuTiP implements this check with the :attr:`~qutip.Qobj.iscp` +is positive [Wat13]_. QuTiP implements this check with the :attr:`.Qobj.iscp` attribute. As an example, notice that the snippet above already calculates the Choi matrix of the transpose map by acting it on half of an entangled pair. We simply need to manually set the ``dims`` and ``superrep`` attributes to reflect the @@ -1477,7 +1482,7 @@ That is, :math:`\Lambda(\rho) = (\Lambda(\rho))^\dagger` for all :math:`\rho` su :math:`\rho = \rho^\dagger`. To see this, we note that :math:`(\rho^{\mathrm{T}})^\dagger = \rho^*`, the complex conjugate of :math:`\rho`. By assumption, :math:`\rho = \rho^\dagger = (\rho^*)^{\mathrm{T}}`, though, such that :math:`\Lambda(\rho) = \Lambda(\rho^\dagger) = \rho^*`. -We can confirm this by checking the :attr:`~qutip.Qobj.ishp` attribute: +We can confirm this by checking the :attr:`.Qobj.ishp` attribute: .. testcode:: [states] @@ -1492,7 +1497,7 @@ We can confirm this by checking the :attr:`~qutip.Qobj.ishp` attribute: Next, we note that the transpose map does preserve the trace of its inputs, such that :math:`\operatorname{Tr}(\Lambda[\rho]) = \operatorname{Tr}(\rho)` for all :math:`\rho`. -This can be confirmed by the :attr:`~qutip.Qobj.istp` attribute: +This can be confirmed by the :attr:`.Qobj.istp` attribute: .. testcode:: [states] diff --git a/doc/guide/guide-steady.rst b/doc/guide/guide-steady.rst index 08e7846e84..41fb1af809 100644 --- a/doc/guide/guide-steady.rst +++ b/doc/guide/guide-steady.rst @@ -19,7 +19,7 @@ Although the requirement for time-independence seems quite resitrictive, one can Steady State solvers in QuTiP ============================= -In QuTiP, the steady-state solution for a system Hamiltonian or Liouvillian is given by :func:`qutip.steadystate.steadystate`. This function implements a number of different methods for finding the steady state, each with their own pros and cons, where the method used can be chosen using the ``method`` keyword argument. +In QuTiP, the steady-state solution for a system Hamiltonian or Liouvillian is given by :func:`.steadystate`. This function implements a number of different methods for finding the steady state, each with their own pros and cons, where the method used can be chosen using the ``method`` keyword argument. .. cssclass:: table-striped @@ -44,7 +44,7 @@ In QuTiP, the steady-state solution for a system Hamiltonian or Liouvillian is g - Steady-state solution via the **dense** SVD of the Liouvillian. -The function :func:`qutip.steadystate` can take either a Hamiltonian and a list +The function :func:`.steadystate` can take either a Hamiltonian and a list of collapse operators as input, generating internally the corresponding Liouvillian super operator in Lindblad form, or alternatively, a Liouvillian passed by the user. @@ -89,7 +89,7 @@ Kernel library that comes with the Anacoda (2.5+) and Intel Python distributions. This gives a substantial increase in performance compared with the standard SuperLU method used by SciPy. To verify that QuTiP can find the necessary libraries, one can check for ``INTEL MKL Ext: True`` in the QuTiP -about box (:func:`qutip.about`). +about box (:func:`.about`). .. _steady-usage: @@ -98,7 +98,7 @@ Using the Steadystate Solver ============================= Solving for the steady state solution to the Lindblad master equation for a -general system with :func:`qutip.steadystate` can be accomplished +general system with :func:`.steadystate` can be accomplished using:: >>> rho_ss = steadystate(H, c_ops) @@ -122,7 +122,7 @@ method, and ``solver="spsolve"`` indicate to use the sparse solver. Sparse solvers may still use quite a large amount of memory when they factorize the matrix since the Liouvillian usually has a large bandwidth. -To address this, :func:`qutip.steadystate` allows one to use the bandwidth minimization algorithms +To address this, :func:`.steadystate` allows one to use the bandwidth minimization algorithms listed in :ref:`steady-args`. For example: .. code-block:: python @@ -211,7 +211,7 @@ The following additional solver arguments are available for the steady-state sol See the corresponding documentation from scipy for a full list. -Further information can be found in the :func:`qutip.steadystate` docstrings. +Further information can be found in the :func:`.steadystate` docstrings. .. _steady-example: diff --git a/doc/guide/guide-super.rst b/doc/guide/guide-super.rst index 76ec70cb19..887e60620a 100644 --- a/doc/guide/guide-super.rst +++ b/doc/guide/guide-super.rst @@ -6,7 +6,7 @@ Superoperators, Pauli Basis and Channel Contraction written by `Christopher Granade `, Institute for Quantum Computing -In this guide, we will demonstrate the :func:`tensor_contract` function, which contracts one or more pairs of indices of a Qobj. This functionality can be used to find rectangular superoperators that implement the partial trace channel :math:S(\rho) = \Tr_2(\rho)`, for instance. Using this functionality, we can quickly turn a system-environment representation of an open quantum process into a superoperator representation. +In this guide, we will demonstrate the :func:`.tensor_contract` function, which contracts one or more pairs of indices of a Qobj. This functionality can be used to find rectangular superoperators that implement the partial trace channel :math:S(\rho) = \Tr_2(\rho)`, for instance. Using this functionality, we can quickly turn a system-environment representation of an open quantum process into a superoperator representation. .. _super-representation-plotting: @@ -17,7 +17,7 @@ Superoperator Representations and Plotting We start off by first demonstrating plotting of superoperators, as this will be useful to us in visualizing the results of a contracted channel. -In particular, we will use Hinton diagrams as implemented by :func:`qutip.visualization.hinton`, which +In particular, we will use Hinton diagrams as implemented by :func:`~qutip.visualization.hinton`, which show the real parts of matrix elements as squares whose size and color both correspond to the magnitude of each element. To illustrate, we first plot a few density operators. .. plot:: @@ -35,7 +35,7 @@ We show superoperators as matrices in the *Pauli basis*, such that any Hermicity As an example, conjugation by :math:`\sigma_z` leaves :math:`\mathbb{1}` and :math:`\sigma_z` invariant, but flips the sign of :math:`\sigma_x` and :math:`\sigma_y`. This is indicated in Hinton diagrams by a negative-valued square for the sign change and a positive-valued square for a +1 sign. .. plot:: - :context: + :context: close-figs hinton(to_super(sigmaz())) @@ -43,7 +43,7 @@ As an example, conjugation by :math:`\sigma_z` leaves :math:`\mathbb{1}` and :ma As a couple more examples, we also consider the supermatrix for a Hadamard transform and for :math:`\sigma_z \otimes H`. .. plot:: - :context: + :context: close-figs hinton(to_super(hadamard_transform())) hinton(to_super(tensor(sigmaz(), hadamard_transform()))) @@ -66,7 +66,8 @@ We can think of the :math:`\scriptstyle \rm CNOT` here as a system-environment r :width: 2.5in -The two tensor wires on the left indicate where we must take a tensor contraction to obtain the measurement map. Numbering the tensor wires from 0 to 3, this corresponds to a :func:`tensor_contract` argument of ``(1, 3)``. +The two tensor wires on the left indicate where we must take a tensor contraction to obtain the measurement map. +Numbering the tensor wires from 0 to 3, this corresponds to a :func:`.tensor_contract` argument of ``(1, 3)``. .. plot:: :context: @@ -74,7 +75,7 @@ The two tensor wires on the left indicate where we must take a tensor contractio tensor_contract(to_super(identity([2, 2])), (1, 3)) -Meanwhile, the :func:`super_tensor` function implements the swap on the right, such that we can quickly find the preparation map. +Meanwhile, the :func:`.super_tensor` function implements the swap on the right, such that we can quickly find the preparation map. .. plot:: :context: @@ -86,14 +87,14 @@ Meanwhile, the :func:`super_tensor` function implements the swap on the right, s For a :math:`\scriptstyle \rm CNOT` system-environment model, the composition of these maps should give us a completely dephasing channel. The channel on both qubits is just the superunitary :math:`\scriptstyle \rm CNOT` channel: .. plot:: - :context: + :context: close-figs hinton(to_super(cnot())) We now complete by multiplying the superunitary :math:`\scriptstyle \rm CNOT` by the preparation channel above, then applying the partial trace channel by contracting the second and fourth index indices. As expected, this gives us a dephasing map. .. plot:: - :context: + :context: close-figs hinton(tensor_contract(to_super(cnot()), (1, 3)) * s_prep) diff --git a/doc/guide/guide-tensor.rst b/doc/guide/guide-tensor.rst index b870ba2672..dab91bcfa9 100644 --- a/doc/guide/guide-tensor.rst +++ b/doc/guide/guide-tensor.rst @@ -12,7 +12,7 @@ Tensor products To describe the states of multipartite quantum systems - such as two coupled qubits, a qubit coupled to an oscillator, etc. - we need to expand the Hilbert space by taking the tensor product of the state vectors for each of the system components. Similarly, the operators acting on the state vectors in the combined Hilbert space (describing the coupled system) are formed by taking the tensor product of the individual operators. -In QuTiP the function :func:`qutip.core.tensor.tensor` is used to accomplish this task. This function takes as argument a collection:: +In QuTiP the function :func:`~qutip.core.tensor.tensor` is used to accomplish this task. This function takes as argument a collection:: >>> tensor(op1, op2, op3) # doctest: +SKIP @@ -58,7 +58,7 @@ or equivalently using the ``list`` format: [0.] [0.]] -This is straightforward to generalize to more qubits by adding more component state vectors in the argument list to the :func:`qutip.core.tensor.tensor` function, as illustrated in the following example: +This is straightforward to generalize to more qubits by adding more component state vectors in the argument list to the :func:`~qutip.core.tensor.tensor` function, as illustrated in the following example: .. testcode:: [tensor] @@ -83,7 +83,7 @@ This is straightforward to generalize to more qubits by adding more component st This state is slightly more complicated, describing two qubits in a superposition between the up and down states, while the third qubit is in its ground state. -To construct operators that act on an extended Hilbert space of a combined system, we similarly pass a list of operators for each component system to the :func:`qutip.core.tensor.tensor` function. For example, to form the operator that represents the simultaneous action of the :math:`\sigma_x` operator on two qubits: +To construct operators that act on an extended Hilbert space of a combined system, we similarly pass a list of operators for each component system to the :func:`~qutip.core.tensor.tensor` function. For example, to form the operator that represents the simultaneous action of the :math:`\sigma_x` operator on two qubits: .. testcode:: [tensor] @@ -125,7 +125,7 @@ To create operators in a combined Hilbert space that only act on a single compon Example: Constructing composite Hamiltonians ============================================ -The :func:`qutip.core.tensor.tensor` function is extensively used when constructing Hamiltonians for composite systems. Here we'll look at some simple examples. +The :func:`~qutip.core.tensor.tensor` function is extensively used when constructing Hamiltonians for composite systems. Here we'll look at some simple examples. .. _tensor-product-example-2qubits: @@ -189,15 +189,16 @@ A two-level system coupled to a cavity: The Jaynes-Cummings model The simplest possible quantum mechanical description for light-matter interaction is encapsulated in the Jaynes-Cummings model, which describes the coupling between a two-level atom and a single-mode electromagnetic field (a cavity mode). Denoting the energy splitting of the atom and cavity ``omega_a`` and ``omega_c``, respectively, and the atom-cavity interaction strength ``g``, the Jaynes-Cummings Hamiltonian can be constructed as: -.. testcode:: [tensor] +.. plot:: + :context: reset - N = 10 + N = 6 omega_a = 1.0 omega_c = 1.25 - g = 0.05 + g = 0.75 a = tensor(identity(2), destroy(N)) @@ -207,95 +208,7 @@ The simplest possible quantum mechanical description for light-matter interactio H = 0.5 * omega_a * sz + omega_c * a.dag() * a + g * (a.dag() * sm + a * sm.dag()) - print(H) - -**Output**: - -.. testoutput:: [tensor] - :options: +NORMALIZE_WHITESPACE - - Quantum object: dims = [[2, 10], [2, 10]], shape = (20, 20), type = oper, isherm = True - Qobj data = - [[ 0.5 0. 0. 0. 0. 0. - 0. 0. 0. 0. 0. 0. - 0. 0. 0. 0. 0. 0. - 0. 0. ] - [ 0. 1.75 0. 0. 0. 0. - 0. 0. 0. 0. 0.05 0. - 0. 0. 0. 0. 0. 0. - 0. 0. ] - [ 0. 0. 3. 0. 0. 0. - 0. 0. 0. 0. 0. 0.07071068 - 0. 0. 0. 0. 0. 0. - 0. 0. ] - [ 0. 0. 0. 4.25 0. 0. - 0. 0. 0. 0. 0. 0. - 0.08660254 0. 0. 0. 0. 0. - 0. 0. ] - [ 0. 0. 0. 0. 5.5 0. - 0. 0. 0. 0. 0. 0. - 0. 0.1 0. 0. 0. 0. - 0. 0. ] - [ 0. 0. 0. 0. 0. 6.75 - 0. 0. 0. 0. 0. 0. - 0. 0. 0.1118034 0. 0. 0. - 0. 0. ] - [ 0. 0. 0. 0. 0. 0. - 8. 0. 0. 0. 0. 0. - 0. 0. 0. 0.12247449 0. 0. - 0. 0. ] - [ 0. 0. 0. 0. 0. 0. - 0. 9.25 0. 0. 0. 0. - 0. 0. 0. 0. 0.13228757 0. - 0. 0. ] - [ 0. 0. 0. 0. 0. 0. - 0. 0. 10.5 0. 0. 0. - 0. 0. 0. 0. 0. 0.14142136 - 0. 0. ] - [ 0. 0. 0. 0. 0. 0. - 0. 0. 0. 11.75 0. 0. - 0. 0. 0. 0. 0. 0. - 0.15 0. ] - [ 0. 0.05 0. 0. 0. 0. - 0. 0. 0. 0. -0.5 0. - 0. 0. 0. 0. 0. 0. - 0. 0. ] - [ 0. 0. 0.07071068 0. 0. 0. - 0. 0. 0. 0. 0. 0.75 - 0. 0. 0. 0. 0. 0. - 0. 0. ] - [ 0. 0. 0. 0.08660254 0. 0. - 0. 0. 0. 0. 0. 0. - 2. 0. 0. 0. 0. 0. - 0. 0. ] - [ 0. 0. 0. 0. 0.1 0. - 0. 0. 0. 0. 0. 0. - 0. 3.25 0. 0. 0. 0. - 0. 0. ] - [ 0. 0. 0. 0. 0. 0.1118034 - 0. 0. 0. 0. 0. 0. - 0. 0. 4.5 0. 0. 0. - 0. 0. ] - [ 0. 0. 0. 0. 0. 0. - 0.12247449 0. 0. 0. 0. 0. - 0. 0. 0. 5.75 0. 0. - 0. 0. ] - [ 0. 0. 0. 0. 0. 0. - 0. 0.13228757 0. 0. 0. 0. - 0. 0. 0. 0. 7. 0. - 0. 0. ] - [ 0. 0. 0. 0. 0. 0. - 0. 0. 0.14142136 0. 0. 0. - 0. 0. 0. 0. 0. 8.25 - 0. 0. ] - [ 0. 0. 0. 0. 0. 0. - 0. 0. 0. 0.15 0. 0. - 0. 0. 0. 0. 0. 0. - 9.5 0. ] - [ 0. 0. 0. 0. 0. 0. - 0. 0. 0. 0. 0. 0. - 0. 0. 0. 0. 0. 0. - 0. 10.75 ]] + hinton(H, fig=plt.figure(figsize=(12, 12))) Here ``N`` is the number of Fock states included in the cavity mode. @@ -305,7 +218,12 @@ Here ``N`` is the number of Fock states included in the cavity mode. Partial trace ============= -The partial trace is an operation that reduces the dimension of a Hilbert space by eliminating some degrees of freedom by averaging (tracing). In this sense it is therefore the converse of the tensor product. It is useful when one is interested in only a part of a coupled quantum system. For open quantum systems, this typically involves tracing over the environment leaving only the system of interest. In QuTiP the class method :func:`qutip.Qobj.ptrace` is used to take partial traces. :func:`qutip.Qobj.ptrace` acts on the :class:`qutip.Qobj` instance for which it is called, and it takes one argument ``sel``, which is a ``list`` of integers that mark the component systems that should be **kept**. All other components are traced out. +The partial trace is an operation that reduces the dimension of a Hilbert space by eliminating some degrees of freedom by averaging (tracing). +In this sense it is therefore the converse of the tensor product. +It is useful when one is interested in only a part of a coupled quantum system. +For open quantum systems, this typically involves tracing over the environment leaving only the system of interest. +In QuTiP the class method :meth:`~qutip.core.qobj.Qobj.ptrace` is used to take partial traces. :meth:`~qutip.core.qobj.Qobj.ptrace` acts on the :class:`~qutip.core.qobj.Qobj` instance for which it is called, and it takes one argument ``sel``, which is a ``list`` of integers that mark the component systems that should be **kept**. +All other components are traced out. For example, the density matrix describing a single qubit obtained from a coupled two-qubit system is obtained via: @@ -374,8 +292,8 @@ using the isomorphism To represent superoperators acting on :math:`\mathcal{L}(\mathcal{H}_1 \otimes \mathcal{H}_2)` thus takes some tensor rearrangement to get the desired ordering :math:`\mathcal{H}_1 \otimes \mathcal{H}_2 \otimes \mathcal{H}_1 \otimes \mathcal{H}_2`. -In particular, this means that :func:`qutip.tensor` does not act as -one might expect on the results of :func:`qutip.superop_reps.to_super`: +In particular, this means that :func:`.tensor` does not act as +one might expect on the results of :func:`.to_super`: .. doctest:: [tensor] @@ -394,8 +312,8 @@ of the compound index with dims ``[2, 3]``. In the latter case, however, each of the Hilbert space indices is listed independently and in the wrong order. -The :func:`qutip.tensor.super_tensor` function performs the needed -rearrangement, providing the most direct analog to :func:`qutip.tensor` on +The :func:`.super_tensor` function performs the needed +rearrangement, providing the most direct analog to :func:`.tensor` on the underlying Hilbert space. In particular, for any two ``type="oper"`` Qobjs ``A`` and ``B``, ``to_super(tensor(A, B)) == super_tensor(to_super(A), to_super(B))`` and ``operator_to_vector(tensor(A, B)) == super_tensor(operator_to_vector(A), operator_to_vector(B))``. Returning to the previous example: @@ -405,8 +323,8 @@ Qobjs ``A`` and ``B``, ``to_super(tensor(A, B)) == super_tensor(to_super(A), to_ >>> super_tensor(to_super(A), to_super(B)).dims [[[2, 3], [2, 3]], [[2, 3], [2, 3]]] -The :func:`qutip.tensor.composite` function automatically switches between -:func:`qutip.tensor` and :func:`qutip.tensor.super_tensor` based on the ``type`` +The :func:`.composite` function automatically switches between +:func:`.tensor` and :func:`.super_tensor` based on the ``type`` of its arguments, such that ``composite(A, B)`` returns an appropriate Qobj to represent the composition of two systems. diff --git a/doc/guide/guide-visualization.rst b/doc/guide/guide-visualization.rst index 2b91b30ebb..c80ffc6973 100644 --- a/doc/guide/guide-visualization.rst +++ b/doc/guide/guide-visualization.rst @@ -282,7 +282,7 @@ structure and relative importance of various elements. QuTiP offers a few functions for quickly visualizing matrix data in the form of histograms, :func:`qutip.visualization.matrix_histogram` and as Hinton diagram of weighted squares, :func:`qutip.visualization.hinton`. -These functions takes a :class:`qutip.Qobj` as first argument, and optional arguments to, +These functions takes a :class:`.Qobj` as first argument, and optional arguments to, for example, set the axis labels and figure title (see the function's documentation for details). @@ -390,7 +390,8 @@ Note that to obtain :math:`\chi` with this method we have to construct a matrix Implementation in QuTiP ----------------------- -In QuTiP, the procedure described above is implemented in the function :func:`qutip.tomography.qpt`, which returns the :math:`\chi` matrix given a density matrix propagator. To illustrate how to use this function, let's consider the SWAP gate for two qubits. In QuTiP the function :func:`qutip.core.operators.swap` generates the unitary transformation for the state kets: +In QuTiP, the procedure described above is implemented in the function :func:`qutip.tomography.qpt`, which returns the :math:`\chi` matrix given a density matrix propagator. +To illustrate how to use this function, let's consider the SWAP gate for two qubits. In QuTiP the function :func:`.swap` generates the unitary transformation for the state kets: .. plot:: @@ -430,4 +431,4 @@ We are now ready to compute :math:`\chi` using :func:`qutip.tomography.qpt`, and -For a slightly more advanced example, where the density matrix propagator is calculated from the dynamics of a system defined by its Hamiltonian and collapse operators using the function :func:`qutip.propagator.propagator`, see notebook "Time-dependent master equation: Landau-Zener transitions" on the tutorials section on the QuTiP web site. +For a slightly more advanced example, where the density matrix propagator is calculated from the dynamics of a system defined by its Hamiltonian and collapse operators using the function :func:`.propagator`, see notebook "Time-dependent master equation: Landau-Zener transitions" on the tutorials section on the QuTiP web site. diff --git a/doc/guide/guide.rst b/doc/guide/guide.rst index 820a82fa07..e450ca737e 100644 --- a/doc/guide/guide.rst +++ b/doc/guide/guide.rst @@ -17,11 +17,10 @@ Users Guide guide-steady.rst guide-piqs.rst guide-correlation.rst - guide-control.rst guide-bloch.rst guide-visualization.rst - guide-parfor.rst guide-saving.rst guide-random.rst guide-settings.rst guide-measurement.rst + guide-control.rst diff --git a/doc/guide/heom/intro.rst b/doc/guide/heom/intro.rst index 5ae195250c..fd3c2ea624 100644 --- a/doc/guide/heom/intro.rst +++ b/doc/guide/heom/intro.rst @@ -40,4 +40,4 @@ In addition to support for bosonic environments, QuTiP also provides support for feriomic environments which is described in :doc:`fermionic`. Both bosonic and fermionic environments are supported via a single solver, -:class:`~qutip.nonmarkov.heom.HEOMSolver`, that supports solving for both dynamics and steady-states. +:class:`.HEOMSolver`, that supports solving for both dynamics and steady-states. diff --git a/doc/requirements.txt b/doc/requirements.txt index aa58f0e06e..6cb3ffacf5 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -21,7 +21,7 @@ packaging==23.0 parso==0.8.3 pexpect==4.8.0 pickleshare==0.7.5 -Pillow==10.0.1 +Pillow==10.2.0 prompt-toolkit==3.0.38 ptyprocess==0.7.0 Pygments==2.15.0 diff --git a/doc/rtd-environment.yml b/doc/rtd-environment.yml index dd4e9f26ca..7cafb0adc0 100644 --- a/doc/rtd-environment.yml +++ b/doc/rtd-environment.yml @@ -8,7 +8,7 @@ dependencies: - certifi==2022.12.7 - chardet==4.0.0 - cycler==0.10.0 -- Cython==0.29.33 +- Cython==3.0.8 - decorator==5.1.1 - docutils==0.18.1 - idna==3.4 @@ -19,7 +19,7 @@ dependencies: - kiwisolver==1.4.4 - MarkupSafe==2.1.2 - matplotlib==3.7.1 -- numpy==1.24.2 +- numpy==1.25.2 - numpydoc==1.5.0 - packaging==23.0 - parso==0.8.3 @@ -33,7 +33,7 @@ dependencies: - python-dateutil==2.8.2 - pytz==2023.3 - requests==2.28.2 -- scipy==1.10.1 +- scipy==1.11.4 - six==1.16.0 - snowballstemmer==2.2.0 - Sphinx==6.1.3 diff --git a/pyproject.toml b/pyproject.toml index 230bbad6b3..896cd3f115 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,7 @@ requires = [ # See https://numpy.org/doc/stable/user/depending_on_numpy.html for # the recommended way to build against numpy's C API: "oldest-supported-numpy", - "scipy>=1.0", + "scipy>=1.8", ] build-backend = "setuptools.build_meta" @@ -18,12 +18,6 @@ manylinux-i686-image = "manylinux2014" # Change in future version to "build" build-frontend = "pip" -[[tool.cibuildwheel.overrides]] -# NumPy and SciPy support manylinux2010 on CPython 3.6 and 3.7 -select = "cp3{6,7}-*" -manylinux-x86_64-image = "manylinux2010" -manylinux-i686-image = "manylinux2010" - [tool.towncrier] directory = "doc/changes" filename = "doc/changelog.rst" diff --git a/qutip/__init__.py b/qutip/__init__.py index ff8d76a7e8..19f373c8b1 100644 --- a/qutip/__init__.py +++ b/qutip/__init__.py @@ -39,7 +39,6 @@ from .bloch import * from .visualization import * from .animation import * -from .bloch3d import * from .matplotlib_utilities import * # library functions diff --git a/qutip/bloch.py b/qutip/bloch.py index 0978ad48bd..5a918ca311 100644 --- a/qutip/bloch.py +++ b/qutip/bloch.py @@ -169,8 +169,10 @@ def __init__(self, fig=None, axes=None, view=None, figsize=None, # ---point options--- # List of colors for Bloch point markers, default = ['b','g','r','y'] self.point_default_color = ['b', 'r', 'g', '#CC6600'] + # Old variable used in V4 to customise the color of the points + self.point_color = None # List that stores the display colors for each set of points - self.point_color = [] + self._inner_point_color = [] # Size of point markers, default = 25 self.point_size = [25, 32, 35, 45] # Shape of point markers, default = ['o','^','d','s'] @@ -360,7 +362,7 @@ def add_points(self, points, meth='s', colors=None, alpha=1.0): self.point_style.append(meth) self.points.append(points) self.point_alpha.append(alpha) - self.point_color.append(colors) + self._inner_point_color.append(colors) def add_states(self, state, kind='vector', colors=None, alpha=1.0): """Add a state vector Qobj to Bloch sphere. @@ -792,19 +794,21 @@ def plot_points(self): dist = np.linalg.norm(points, axis=0) if not np.allclose(dist, dist[0], rtol=1e-12): indperm = np.argsort(dist) - points = points[:, indperm] else: indperm = np.arange(num_points) s = self.point_size[np.mod(k, len(self.point_size))] marker = self.point_marker[np.mod(k, len(self.point_marker))] style = self.point_style[k] - if self.point_color[k] is not None: - color = self.point_color[k] + + if self._inner_point_color[k] is not None: + color = self._inner_point_color[k] + elif self.point_color is not None: + color = self.point_color elif self.point_style[k] in ['s', 'l']: - color = self.point_default_color[ + color = [self.point_default_color[ k % len(self.point_default_color) - ] + ]] elif self.point_style[k] == 'm': length = np.ceil(num_points/len(self.point_default_color)) color = np.tile(self.point_default_color, length.astype(int)) @@ -812,9 +816,9 @@ def plot_points(self): color = list(color) if self.point_style[k] in ['s', 'm']: - self.axes.scatter(np.real(points[1]), - -np.real(points[0]), - np.real(points[2]), + self.axes.scatter(np.real(points[1][indperm]), + -np.real(points[0][indperm]), + np.real(points[2][indperm]), s=s, marker=marker, color=color, @@ -824,6 +828,7 @@ def plot_points(self): ) elif self.point_style[k] == 'l': + color = color[k % len(color)] self.axes.plot(np.real(points[1]), -np.real(points[0]), np.real(points[2]), diff --git a/qutip/bloch3d.py b/qutip/bloch3d.py deleted file mode 100644 index 160b9662e1..0000000000 --- a/qutip/bloch3d.py +++ /dev/null @@ -1,516 +0,0 @@ -__all__ = ['Bloch3d'] - -import numpy as np -from . import Qobj, expect, sigmax, sigmay, sigmaz - - -class Bloch3d: - """Class for plotting data on a 3D Bloch sphere using mayavi. - Valid data can be either points, vectors, or qobj objects - corresponding to state vectors or density matrices. for - a two-state system (or subsystem). - - Attributes - ---------- - fig : instance {None} - User supplied Matplotlib Figure instance for plotting Bloch sphere. - font_color : str {'black'} - Color of font used for Bloch sphere labels. - font_scale : float {0.08} - Scale for font used for Bloch sphere labels. - frame : bool {True} - Draw frame for Bloch sphere - frame_alpha : float {0.05} - Sets transparency of Bloch sphere frame. - frame_color : str {'gray'} - Color of sphere wireframe. - frame_num : int {8} - Number of frame elements to draw. - frame_radius : floats {0.005} - Width of wireframe. - point_color : list {['r', 'g', 'b', 'y']} - List of colors for Bloch sphere point markers to cycle through. - i.e. By default, points 0 and 4 will both be blue ('r'). - point_mode : string {'sphere','cone','cube','cylinder','point'} - Point marker shapes. - point_size : float {0.075} - Size of points on Bloch sphere. - sphere_alpha : float {0.1} - Transparency of Bloch sphere itself. - sphere_color : str {'#808080'} - Color of Bloch sphere. - size : list {[500, 500]} - Size of Bloch sphere plot in pixels. Best to have both numbers the same - otherwise you will have a Bloch sphere that looks like a football. - vector_color : list {['r', 'g', 'b', 'y']} - List of vector colors to cycle through. - vector_width : int {3} - Width of displayed vectors. - view : list {[45,65]} - Azimuthal and Elevation viewing angles. - xlabel : list {['\|x>', '']} - List of strings corresponding to +x and -x axes labels, respectively. - xlpos : list {[1.07,-1.07]} - Positions of +x and -x labels respectively. - ylabel : list {['\|y>', '']} - List of strings corresponding to +y and -y axes labels, respectively. - ylpos : list {[1.07,-1.07]} - Positions of +y and -y labels respectively. - zlabel : list {["\|0>", '\|1>']} - List of strings corresponding to +z and -z axes labels, respectively. - zlpos : list {[1.07,-1.07]} - Positions of +z and -z labels respectively. - - Notes - ----- - The use of mayavi for 3D rendering of the Bloch sphere comes with - a few limitations: I) You can not embed a Bloch3d figure into a - matplotlib window. II) The use of LaTex is not supported by the - mayavi rendering engine. Therefore all labels must be defined using - standard text. Of course you can post-process the generated figures - later to add LaTeX using other software if needed. - - """ - def __init__(self, fig=None): - # ----check for mayavi----- - try: - from mayavi import mlab - except: - raise Exception("This function requires the mayavi module.") - - # ---Image options--- - self.fig = None - self.user_fig = None - # check if user specified figure or axes. - if fig: - self.user_fig = fig - # The size of the figure in inches, default = [500,500]. - self.size = [500, 500] - # Azimuthal and Elvation viewing angles, default = [45,65]. - self.view = [45, 65] - # Image background color - self.bgcolor = 'white' - # Image foreground color. Other options can override. - self.fgcolor = 'black' - - # ---Sphere options--- - # Color of Bloch sphere, default = #808080 - self.sphere_color = '#808080' - # Transparency of Bloch sphere, default = 0.1 - self.sphere_alpha = 0.1 - - # ---Frame options--- - # Draw frame? - self.frame = True - # number of lines to draw for frame - self.frame_num = 8 - # Color of wireframe, default = 'gray' - self.frame_color = 'black' - # Transparency of wireframe, default = 0.2 - self.frame_alpha = 0.05 - # Radius of frame lines - self.frame_radius = 0.005 - - # --Axes--- - # Axes color - self.axes_color = 'black' - # Transparency of axes - self.axes_alpha = 0.4 - # Radius of axes lines - self.axes_radius = 0.005 - - # ---Labels--- - # Labels for x-axis (in LaTex), default = ['$x$',''] - self.xlabel = ['|x>', ''] - # Position of x-axis labels, default = [1.2,-1.2] - self.xlpos = [1.07, -1.07] - # Labels for y-axis (in LaTex), default = ['$y$',''] - self.ylabel = ['|y>', ''] - # Position of y-axis labels, default = [1.1,-1.1] - self.ylpos = [1.07, -1.07] - # Labels for z-axis - self.zlabel = ['|0>', '|1>'] - # Position of z-axis labels, default = [1.05,-1.05] - self.zlpos = [1.07, -1.07] - - # ---Font options--- - # Color of fonts, default = 'black' - self.font_color = 'black' - # Size of fonts, default = 20 - self.font_scale = 0.08 - - # ---Vector options--- - # Object used for representing vectors on Bloch sphere. - # List of colors for Bloch vectors, default = ['b','g','r','y'] - self.vector_color = ['r', 'g', 'b', 'y'] - # Width of Bloch vectors, default = 2 - self.vector_width = 2.0 - # Height of vector head - self.vector_head_height = 0.15 - # Radius of vector head - self.vector_head_radius = 0.075 - - # ---Point options--- - # List of colors for Bloch point markers, default = ['b','g','r','y'] - self.point_color = ['r', 'g', 'b', 'y'] - # Size of point markers - self.point_size = 0.06 - # Shape of point markers - # Options: 'cone' or 'cube' or 'cylinder' or 'point' or 'sphere'. - # Default = 'sphere' - self.point_mode = 'sphere' - - # ---Data lists--- - # Data for point markers - self.points = [] - # Data for Bloch vectors - self.vectors = [] - # Number of times sphere has been saved - self.savenum = 0 - # Style of points, 'm' for multiple colors, 's' for single color - self.point_style = [] - # Transparency of points - self.point_alpha = [] - # Transparency of vectors - self.vector_alpha = [] - - def __str__(self): - s = "" - s += "Bloch3D data:\n" - s += "-----------\n" - s += "Number of points: " + str(len(self.points)) + "\n" - s += "Number of vectors: " + str(len(self.vectors)) + "\n" - s += "\n" - s += "Bloch3D sphere properties:\n" - s += "--------------------------\n" - s += "axes_alpha: " + str(self.axes_alpha) + "\n" - s += "axes_color: " + str(self.axes_color) + "\n" - s += "axes_radius: " + str(self.axes_radius) + "\n" - s += "bgcolor: " + str(self.bgcolor) + "\n" - s += "fgcolor: " + str(self.fgcolor) + "\n" - s += "font_color: " + str(self.font_color) + "\n" - s += "font_scale: " + str(self.font_scale) + "\n" - s += "frame: " + str(self.frame) + "\n" - s += "frame_alpha: " + str(self.frame_alpha) + "\n" - s += "frame_color: " + str(self.frame_color) + "\n" - s += "frame_num: " + str(self.frame_num) + "\n" - s += "frame_radius: " + str(self.frame_radius) + "\n" - s += "point_color: " + str(self.point_color) + "\n" - s += "point_mode: " + str(self.point_mode) + "\n" - s += "point_size: " + str(self.point_size) + "\n" - s += "sphere_alpha: " + str(self.sphere_alpha) + "\n" - s += "sphere_color: " + str(self.sphere_color) + "\n" - s += "size: " + str(self.size) + "\n" - s += "vector_color: " + str(self.vector_color) + "\n" - s += "vector_width: " + str(self.vector_width) + "\n" - s += "vector_head_height: " + str(self.vector_head_height) + "\n" - s += "vector_head_radius: " + str(self.vector_head_radius) + "\n" - s += "view: " + str(self.view) + "\n" - s += "xlabel: " + str(self.xlabel) + "\n" - s += "xlpos: " + str(self.xlpos) + "\n" - s += "ylabel: " + str(self.ylabel) + "\n" - s += "ylpos: " + str(self.ylpos) + "\n" - s += "zlabel: " + str(self.zlabel) + "\n" - s += "zlpos: " + str(self.zlpos) + "\n" - return s - - def clear(self): - """Resets the Bloch sphere data sets to empty. - """ - self.points = [] - self.vectors = [] - self.point_style = [] - - def add_points(self, points, meth='s', alpha=1.0): - """Add a list of data points to bloch sphere. - - Parameters - ---------- - points : array/list - Collection of data points. - - meth : str {'s','m'} - Type of points to plot, use 'm' for multicolored. - - alpha : float, default=1. - Transparency value for the vectors. Values between 0 and 1. - - """ - if not isinstance(points[0], (list, np.ndarray)): - points = [[points[0]], [points[1]], [points[2]]] - points = np.array(points) - if meth == 's': - if len(points[0]) == 1: - pnts = np.array( - [[points[0][0]], [points[1][0]], [points[2][0]]]) - pnts = np.append(pnts, points, axis=1) - else: - pnts = points - self.points.append(pnts) - self.point_style.append('s') - else: - self.points.append(points) - self.point_style.append('m') - self.point_alpha.append(alpha) - - def add_states(self, state, kind='vector', alpha=1.0): - """Add a state vector Qobj to Bloch sphere. - - Parameters - ---------- - state : qobj - Input state vector. - - kind : str {'vector','point'} - Type of object to plot. - - alpha : float, default=1. - Transparency value for the vectors. Values between 0 and 1. - """ - if isinstance(state, Qobj): - state = [state] - for st in state: - if kind == 'vector': - vec = [expect(sigmax(), st), expect(sigmay(), st), - expect(sigmaz(), st)] - self.add_vectors(vec, alpha=alpha) - elif kind == 'point': - pnt = [expect(sigmax(), st), expect(sigmay(), st), - expect(sigmaz(), st)] - self.add_points(pnt, alpha=alpha) - - def add_vectors(self, vectors, alpha=1.0): - """Add a list of vectors to Bloch sphere. - - Parameters - ---------- - vectors : array/list - Array with vectors of unit length or smaller. - - alpha : float, default=1. - Transparency value for the vectors. Values between 0 and 1. - - """ - if isinstance(vectors[0], (list, np.ndarray)): - for vec in vectors: - self.vectors.append(vec) - self.vector_alpha.append(alpha) - else: - self.vectors.append(vectors) - self.vector_alpha.append(alpha) - - def plot_vectors(self): - """ - Plots vectors on the Bloch sphere. - """ - from mayavi import mlab - from tvtk.api import tvtk - import matplotlib.colors as colors - ii = 0 - for k in range(len(self.vectors)): - vec = np.array(self.vectors[k]) - norm = np.linalg.norm(vec) - theta = np.arccos(vec[2] / norm) - phi = np.arctan2(vec[1], vec[0]) - vec -= 0.5 * self.vector_head_height * \ - np.array([np.sin(theta) * np.cos(phi), - np.sin(theta) * np.sin(phi), np.cos(theta)]) - - color = colors.colorConverter.to_rgb( - self.vector_color[np.mod(k, len(self.vector_color))]) - - mlab.plot3d([0, vec[0]], [0, vec[1]], [0, vec[2]], - name='vector' + str(ii), tube_sides=100, - line_width=self.vector_width, - opacity=self.vector_alpha[k], - color=color) - - cone = tvtk.ConeSource(height=self.vector_head_height, - radius=self.vector_head_radius, - resolution=100) - cone_mapper = tvtk.PolyDataMapper( - input_connection=cone.output_port) - prop = tvtk.Property(opacity=self.vector_alpha[k], color=color) - cc = tvtk.Actor(mapper=cone_mapper, property=prop) - cc.rotate_z(np.degrees(phi)) - cc.rotate_y(-90 + np.degrees(theta)) - cc.position = vec - self.fig.scene.add_actor(cc) - - def plot_points(self): - """ - Plots points on the Bloch sphere. - """ - from mayavi import mlab - import matplotlib.colors as colors - for k in range(len(self.points)): - num = len(self.points[k][0]) - dist = [np.sqrt(self.points[k][0][j] ** 2 + - self.points[k][1][j] ** 2 + - self.points[k][2][j] ** 2) for j in range(num)] - if any(abs(dist - dist[0]) / dist[0] > 1e-12): - # combine arrays so that they can be sorted together - # and sort rates from lowest to highest - zipped = sorted(zip(dist, range(num))) - dist, indperm = zip(*zipped) - indperm = np.array(indperm) - else: - indperm = range(num) - if self.point_style[k] == 's': - color = colors.colorConverter.to_rgb( - self.point_color[np.mod(k, len(self.point_color))]) - mlab.points3d( - self.points[k][0][indperm], self.points[k][1][indperm], - self.points[k][2][indperm], figure=self.fig, - resolution=100, scale_factor=self.point_size, - mode=self.point_mode, color=color, - opacity=self.point_alpha[k]) - - elif self.point_style[k] == 'm': - pnt_colors = np.array(self.point_color * np.ceil( - num / float(len(self.point_color)))) - pnt_colors = pnt_colors[0:num] - pnt_colors = list(pnt_colors[indperm]) - for kk in range(num): - mlab.points3d( - self.points[k][0][ - indperm[kk]], self.points[k][1][indperm[kk]], - self.points[k][2][ - indperm[kk]], figure=self.fig, resolution=100, - scale_factor=self.point_size, mode=self.point_mode, - color=colors.colorConverter.to_rgb(pnt_colors[kk]), - opacity=self.point_alpha[k]) - - def make_sphere(self): - """ - Plots Bloch sphere and data sets. - """ - # setup plot - # Figure instance for Bloch sphere plot - from mayavi import mlab - import matplotlib.colors as colors - if self.user_fig: - self.fig = self.user_fig - else: - self.fig = mlab.figure( - 1, size=self.size, - bgcolor=colors.colorConverter.to_rgb(self.bgcolor), - fgcolor=colors.colorConverter.to_rgb(self.fgcolor)) - - sphere = mlab.points3d( - 0, 0, 0, figure=self.fig, scale_mode='none', scale_factor=2, - color=colors.colorConverter.to_rgb(self.sphere_color), - resolution=100, opacity=self.sphere_alpha, name='bloch_sphere') - - # Thse commands make the sphere look better - sphere.actor.property.specular = 0.45 - sphere.actor.property.specular_power = 5 - sphere.actor.property.backface_culling = True - - # make frame for sphere surface - if self.frame: - theta = np.linspace(0, 2 * np.pi, 100) - for angle in np.linspace(-np.pi, np.pi, self.frame_num): - xlat = np.cos(theta) * np.cos(angle) - ylat = np.sin(theta) * np.cos(angle) - zlat = np.ones_like(theta) * np.sin(angle) - xlon = np.sin(angle) * np.sin(theta) - ylon = np.cos(angle) * np.sin(theta) - zlon = np.cos(theta) - mlab.plot3d( - xlat, ylat, zlat, - color=colors.colorConverter.to_rgb(self.frame_color), - opacity=self.frame_alpha, tube_radius=self.frame_radius) - mlab.plot3d( - xlon, ylon, zlon, - color=colors.colorConverter.to_rgb(self.frame_color), - opacity=self.frame_alpha, tube_radius=self.frame_radius) - - # add axes - axis = np.linspace(-1.0, 1.0, 10) - other = np.zeros_like(axis) - mlab.plot3d( - axis, other, other, - color=colors.colorConverter.to_rgb(self.axes_color), - tube_radius=self.axes_radius, opacity=self.axes_alpha) - mlab.plot3d( - other, axis, other, - color=colors.colorConverter.to_rgb(self.axes_color), - tube_radius=self.axes_radius, opacity=self.axes_alpha) - mlab.plot3d( - other, other, axis, - color=colors.colorConverter.to_rgb(self.axes_color), - tube_radius=self.axes_radius, opacity=self.axes_alpha) - - # add data to sphere - self.plot_points() - self.plot_vectors() - - # #add labels - mlab.text3d(0, 0, self.zlpos[0], self.zlabel[0], - color=colors.colorConverter.to_rgb(self.font_color), - scale=self.font_scale) - mlab.text3d(0, 0, self.zlpos[1], self.zlabel[1], - color=colors.colorConverter.to_rgb(self.font_color), - scale=self.font_scale) - mlab.text3d(self.xlpos[0], 0, 0, self.xlabel[0], - color=colors.colorConverter.to_rgb(self.font_color), - scale=self.font_scale) - mlab.text3d(self.xlpos[1], 0, 0, self.xlabel[1], - color=colors.colorConverter.to_rgb(self.font_color), - scale=self.font_scale) - mlab.text3d(0, self.ylpos[0], 0, self.ylabel[0], - color=colors.colorConverter.to_rgb(self.font_color), - scale=self.font_scale) - mlab.text3d(0, self.ylpos[1], 0, self.ylabel[1], - color=colors.colorConverter.to_rgb(self.font_color), - scale=self.font_scale) - - def show(self): - """ - Display the Bloch sphere and corresponding data sets. - """ - from mayavi import mlab - self.make_sphere() - mlab.view(azimuth=self.view[0], elevation=self.view[1], distance=5) - if self.fig: - mlab.show() - - def save(self, name=None, format='png', dirc=None): - """Saves Bloch sphere to file of type ``format`` in directory ``dirc``. - - Parameters - ---------- - name : str - Name of saved image. Must include path and format as well. - i.e. '/Users/Me/Desktop/bloch.png' - This overrides the 'format' and 'dirc' arguments. - format : str - Format of output image. Default is 'png'. - dirc : str - Directory for output images. Defaults to current working directory. - - Returns - ------- - File containing plot of Bloch sphere. - - """ - from mayavi import mlab - import os - self.make_sphere() - mlab.view(azimuth=self.view[0], elevation=self.view[1], distance=5) - if dirc: - if not os.path.isdir(os.getcwd() + "/" + str(dirc)): - os.makedirs(os.getcwd() + "/" + str(dirc)) - if name is None: - if dirc: - mlab.savefig(os.getcwd() + "/" + str(dirc) + '/bloch_' + - str(self.savenum) + '.' + format) - else: - mlab.savefig(os.getcwd() + '/bloch_' + str(self.savenum) + - '.' + format) - else: - mlab.savefig(name) - self.savenum += 1 - if self.fig: - mlab.close(self.fig) diff --git a/qutip/core/cy/_element.pyx b/qutip/core/cy/_element.pyx index cfe3154742..d77f3e6dfd 100644 --- a/qutip/core/cy/_element.pyx +++ b/qutip/core/cy/_element.pyx @@ -267,6 +267,10 @@ cdef class _BaseElement: return new.qobj(t) * new.coeff(t) return self.qobj(t) * self.coeff(t) + @property + def dtype(self): + return None + cdef class _ConstantElement(_BaseElement): """ @@ -313,6 +317,10 @@ cdef class _ConstantElement(_BaseElement): def __call__(self, t, args=None): return self._qobj + @property + def dtype(self): + return type(self._data) + cdef class _EvoElement(_BaseElement): """ @@ -370,6 +378,10 @@ cdef class _EvoElement(_BaseElement): self._coefficient.replace_arguments(args) ) + @property + def dtype(self): + return type(self._data) + cdef class _FuncElement(_BaseElement): """ diff --git a/qutip/core/cy/qobjevo.pyx b/qutip/core/cy/qobjevo.pyx index 67c5195c44..f508189c31 100644 --- a/qutip/core/cy/qobjevo.pyx +++ b/qutip/core/cy/qobjevo.pyx @@ -908,12 +908,12 @@ cdef class QobjEvo: @property def isoper(self): """Indicates if the system represents an operator.""" - return self._dims.type == "oper" + return self._dims.type in ['oper', 'scalar'] @property def issuper(self): """Indicates if the system represents a superoperator.""" - return self._dims.issuper + return self._dims.type == 'super' @property def dims(self): @@ -927,6 +927,41 @@ cdef class QobjEvo: def superrep(self): return self._dims.superrep + @property + def isbra(self): + """Indicates if the system represents a bra state.""" + return self._dims.type in ['bra', 'scalar'] + + @property + def isket(self): + """Indicates if the system represents a ket state.""" + return self._dims.type in ['ket', 'scalar'] + + @property + def isoperket(self): + """Indicates if the system represents a operator-ket state.""" + return self._dims.type == 'operator-ket' + + @property + def isoperbra(self): + """Indicates if the system represents a operator-bra state.""" + return self._dims.type == 'operator-bra' + + @property + def dtype(self): + """ + Type of the data layers of the QobjEvo. + When different data layers are used, we return the type of the sum of + the parts. + """ + part_types = [part.dtype for part in self.elements] + if ( + part_types[0] is not None + and all(part == part_types[0] for part in part_types) + ): + return part_types[0] + return self(0).dtype + ########################################################################### # operation methods # ########################################################################### diff --git a/qutip/core/data/constant.py b/qutip/core/data/constant.py index 8061bb8832..d6264c5f89 100644 --- a/qutip/core/data/constant.py +++ b/qutip/core/data/constant.py @@ -96,7 +96,9 @@ def identity_like_data(data, /): Create an identity matrix of the same type and shape. """ if not data.shape[0] == data.shape[1]: - raise ValueError("Can't create and identity like a non square matrix.") + raise ValueError( + "Can't create an identity matrix like a non square matrix." + ) return identity[type(data)](data.shape[0]) @@ -105,7 +107,9 @@ def identity_like_dense(data, /): Create an identity matrix of the same type and shape. """ if not data.shape[0] == data.shape[1]: - raise ValueError("Can't create and identity like a non square matrix.") + raise ValueError( + "Can't create an identity matrix like a non square matrix." + ) return dense.identity(data.shape[0], fortran=data.fortran) diff --git a/qutip/core/data/csr.pyx b/qutip/core/data/csr.pyx index c7eed323c2..5bbce551cc 100644 --- a/qutip/core/data/csr.pyx +++ b/qutip/core/data/csr.pyx @@ -530,9 +530,21 @@ cpdef CSR empty(base.idxint rows, base.idxint cols, base.idxint size): PyDataMem_NEW(size * sizeof(base.idxint)) out.row_index =\ PyDataMem_NEW(row_size * sizeof(base.idxint)) - if not out.data: raise MemoryError() - if not out.col_index: raise MemoryError() - if not out.row_index: raise MemoryError() + if not out.data: + raise MemoryError( + f"Failed to allocate the `data` of a ({rows}, {cols}) " + f"CSR array of {size} max elements." + ) + if not out.col_index: + raise MemoryError( + f"Failed to allocate the `col_index` of a ({rows}, {cols}) " + f"CSR array of {size} max elements." + ) + if not out.row_index: + raise MemoryError( + f"Failed to allocate the `row_index` of a ({rows}, {cols}) " + f"CSR array of {size} max elements." + ) # Set the number of non-zero elements to 0. out.row_index[rows] = 0 return out @@ -604,7 +616,10 @@ cdef CSR from_coo_pointers( data_tmp = mem.PyMem_Malloc(nnz * sizeof(double complex)) cols_tmp = mem.PyMem_Malloc(nnz * sizeof(base.idxint)) if data_tmp == NULL or cols_tmp == NULL: - raise MemoryError + raise MemoryError( + f"Failed to allocate the memory needed for a ({n_rows}, {n_cols}) " + f"CSR array with {nnz} elements." + ) with nogil: memset(out.row_index, 0, (n_rows + 1) * sizeof(base.idxint)) for ptr_in in range(nnz): diff --git a/qutip/core/data/dense.pyx b/qutip/core/data/dense.pyx index 1fc7f35670..0db9034fa0 100644 --- a/qutip/core/data/dense.pyx +++ b/qutip/core/data/dense.pyx @@ -120,7 +120,11 @@ cdef class Dense(base.Data): cdef Dense out = Dense.__new__(Dense) cdef size_t size = self.shape[0]*self.shape[1]*sizeof(double complex) cdef double complex *ptr = PyDataMem_NEW(size) - if not ptr: raise MemoryError() + if not ptr: + raise MemoryError( + "Could not allocate memory to copy a " + f"({self.shape[0]}, {self.shape[1]}) Dense matrix." + ) memcpy(ptr, self.data, size) out.shape = self.shape out.data = ptr @@ -163,7 +167,11 @@ cdef class Dense(base.Data): """ cdef size_t size = self.shape[0]*self.shape[1]*sizeof(double complex) cdef double complex *ptr = PyDataMem_NEW(size) - if not ptr: raise MemoryError() + if not ptr: + raise MemoryError( + "Could not allocate memory to convert to a numpy array a " + f"({self.shape[0]}, {self.shape[1]}) Dense matrix." + ) memcpy(ptr, self.data, size) cdef object out =\ cnp.PyArray_SimpleNewFromData(2, [self.shape[0], self.shape[1]], @@ -246,7 +254,11 @@ cpdef Dense empty(base.idxint rows, base.idxint cols, bint fortran=True): cdef Dense out = Dense.__new__(Dense) out.shape = (rows, cols) out.data = PyDataMem_NEW(rows * cols * sizeof(double complex)) - if not out.data: raise MemoryError() + if not out.data: + raise MemoryError( + "Could not allocate memory to create an empty " + f"({rows}, {cols}) Dense matrix." + ) out._deallocate = True out.fortran = fortran return out @@ -267,7 +279,11 @@ cpdef Dense zeros(base.idxint rows, base.idxint cols, bint fortran=True): out.shape = (rows, cols) out.data =\ PyDataMem_NEW_ZEROED(rows * cols, sizeof(double complex)) - if not out.data: raise MemoryError() + if not out.data: + raise MemoryError( + "Could not allocate memory to create a zero " + f"({rows}, {cols}) Dense matrix." + ) out.fortran = fortran out._deallocate = True return out @@ -294,7 +310,11 @@ cpdef Dense from_csr(CSR matrix, bint fortran=False): PyDataMem_NEW_ZEROED(out.shape[0]*out.shape[1], sizeof(double complex)) ) - if not out.data: raise MemoryError() + if not out.data: + raise MemoryError( + "Could not allocate memory to create a " + f"({out.shape[0]}, {out.shape[1]}) Dense matrix from a CSR." + ) out.fortran = fortran out._deallocate = True cdef size_t row, ptr_in, ptr_out, row_stride, col_stride diff --git a/qutip/core/data/dia.pyx b/qutip/core/data/dia.pyx index 92d68b8f89..9414298402 100644 --- a/qutip/core/data/dia.pyx +++ b/qutip/core/data/dia.pyx @@ -271,8 +271,16 @@ cpdef Dia empty(base.idxint rows, base.idxint cols, base.idxint num_diag): PyDataMem_NEW(cols * num_diag * sizeof(double complex)) out.offsets =\ PyDataMem_NEW(num_diag * sizeof(base.idxint)) - if not out.data: raise MemoryError() - if not out.offsets: raise MemoryError() + if not out.data: + raise MemoryError( + f"Failed to allocate the `data` of a ({rows}, {cols}) " + f"Dia array of {num_diag} diagonals." + ) + if not out.offsets: + raise MemoryError( + f"Failed to allocate the `offsets` of a ({rows}, {cols}) " + f"Dia array of {num_diag} diagonals." + ) return out diff --git a/qutip/core/dimensions.py b/qutip/core/dimensions.py index 7e0f17cd57..7e7c4538d1 100644 --- a/qutip/core/dimensions.py +++ b/qutip/core/dimensions.py @@ -783,6 +783,25 @@ def __eq__(self, other): ) return NotImplemented + def __ne__(self, other): + if isinstance(other, Dimensions): + return not ( + self is other + or ( + self.to_ == other.to_ + and self.from_ == other.from_ + ) + ) + return NotImplemented + + def __matmul__(self, other): + if self.from_ != other.to_: + raise TypeError(f"incompatible dimensions {self} and {other}") + args = other.from_, self.to_ + if args in Dimensions._stored_dims: + return Dimensions._stored_dims[args] + return Dimensions(*args) + def __hash__(self): return hash((self.to_, self.from_)) diff --git a/qutip/core/operators.py b/qutip/core/operators.py index 2ef1f3ff03..608ecc7fc5 100644 --- a/qutip/core/operators.py +++ b/qutip/core/operators.py @@ -588,6 +588,7 @@ def _f_op(n_sites, site, action, dtype=None): oper : qobj Qobj for destruction operator. """ + dtype = dtype or settings.core["default_dtype"] or _data.CSR # get `tensor` and sigma z objects from .tensor import tensor s_z = 2 * jmat(0.5, 'z', dtype=dtype) @@ -614,7 +615,7 @@ def _f_op(n_sites, site, action, dtype=None): eye = identity(2, dtype=dtype) opers = [s_z] * site + [operator] + [eye] * (n_sites - site - 1) - return tensor(opers) + return tensor(opers).to(dtype) def _implicit_tensor_dimensions(dimensions): @@ -691,11 +692,9 @@ def qzero_like(qobj): Zero operator Qobj. """ - from .cy.qobjevo import QobjEvo - if isinstance(qobj, QobjEvo): - qobj = qobj(0) + return Qobj( - _data.zeros_like(qobj.data), dims=qobj._dims, + _data.zeros[qobj.dtype](*qobj.shape), dims=qobj._dims, isherm=True, isunitary=False, copy=False ) @@ -767,11 +766,12 @@ def qeye_like(qobj): Identity operator Qobj. """ - from .cy.qobjevo import QobjEvo - if isinstance(qobj, QobjEvo): - qobj = qobj(0) + if qobj.shape[0] != qobj.shape[1]: + raise ValueError( + "Can't create an identity matrix like a non square matrix." + ) return Qobj( - _data.identity_like(qobj.data), dims=qobj._dims, + _data.identity[qobj.dtype](qobj.shape[0]), dims=qobj._dims, isherm=True, isunitary=True, copy=False ) @@ -798,10 +798,11 @@ def position(N, offset=0, *, dtype=None): oper : qobj Position operator as Qobj. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dia a = destroy(N, offset=offset, dtype=dtype) position = np.sqrt(0.5) * (a + a.dag()) position.isherm = True - return position + return position.to(dtype) def momentum(N, offset=0, *, dtype=None): @@ -826,10 +827,11 @@ def momentum(N, offset=0, *, dtype=None): oper : qobj Momentum operator as Qobj. """ + dtype = dtype or settings.core["default_dtype"] or _data.Dia a = destroy(N, offset=offset, dtype=dtype) momentum = -1j * np.sqrt(0.5) * (a - a.dag()) momentum.isherm = True - return momentum + return momentum.to(dtype) def num(N, offset=0, *, dtype=None): diff --git a/qutip/core/qobj.py b/qutip/core/qobj.py index d42e08b9bc..2b4dcda24f 100644 --- a/qutip/core/qobj.py +++ b/qutip/core/qobj.py @@ -81,13 +81,17 @@ def _require_equal_type(method): """ @functools.wraps(method) def out(self, other): + if isinstance(other, Qobj): + if self._dims != other._dims: + msg = ( + "incompatible dimensions " + + repr(self.dims) + " and " + repr(other.dims) + ) + raise ValueError(msg) + return method(self, other) if other == 0: return method(self, other) - if ( - self.type in ('oper', 'super') - and self._dims[0] == self._dims[1] - and isinstance(other, numbers.Number) - ): + if self._dims.issquare and isinstance(other, numbers.Number): scale = complex(other) other = Qobj(_data.identity(self.shape[0], scale, dtype=type(self.data)), @@ -95,18 +99,9 @@ def out(self, other): isherm=(scale.imag == 0), isunitary=(abs(abs(scale)-1) < settings.core['atol']), copy=False) - if not isinstance(other, Qobj): - try: - other = Qobj(other, dims=self._dims) - except TypeError: - return NotImplemented - if self._dims != other._dims: - msg = ( - "incompatible dimensions " - + repr(self.dims) + " and " + repr(other.dims) - ) - raise ValueError(msg) - return method(self, other) + return method(self, other) + return NotImplemented + return out @@ -335,6 +330,10 @@ def superrep(self, super_rep): def data(self): return self._data + @property + def dtype(self): + return type(self._data) + @data.setter def data(self, data): if not isinstance(data, _data.Data): @@ -377,12 +376,11 @@ def to(self, data_type): converter = _data.to[data_type] except (KeyError, TypeError): raise ValueError("Unknown conversion type: " + str(data_type)) - if type(self.data) is data_type: + if type(self._data) is data_type: return self return Qobj(converter(self._data), dims=self._dims, isherm=self._isherm, - superrep=self.superrep, isunitary=self._isunitary, copy=False) @@ -441,7 +439,6 @@ def __mul__(self, other): return Qobj(out, dims=self._dims, isherm=isherm, - superrep=self.superrep, isunitary=isunitary, copy=False) @@ -456,23 +453,16 @@ def __matmul__(self, other): other = Qobj(other) except TypeError: return NotImplemented - if self._dims[1] != other._dims[0]: - raise TypeError("".join([ - "incompatible dimensions ", - repr(self.dims), - " and ", - repr(other.dims), - ])) - if ( - (self.isbra and other.isket) - or (self.isoperbra and other.isoperket) - ): - return _data.inner(self.data, other.data) + new_dims = self._dims @ other._dims + if new_dims.type == 'scalar': + return _data.inner(self._data, other._data) - return Qobj(_data.matmul(self.data, other.data), - dims=Dimensions(other._dims[1], self._dims[0]), - isunitary=self._isunitary and other._isunitary, - copy=False) + return Qobj( + _data.matmul(self._data, other._data), + dims=new_dims, + isunitary=self._isunitary and other._isunitary, + copy=False + ) def __truediv__(self, other): return self.__mul__(1 / other) @@ -526,7 +516,7 @@ def __pow__(self, n, m=None): # calculates powers of Qobj def _str_header(self): out = ", ".join([ "Quantum object: dims=" + str(self.dims), - "shape=" + str(self.data.shape), + "shape=" + str(self._data.shape), "type=" + repr(self.type), ]) if self.type in ('oper', 'super'): @@ -701,7 +691,7 @@ def norm(self, norm=None, kwargs=None): "vector norm must be in " + repr(_NORM_ALLOWED_VECTOR) ) kwargs = kwargs or {} - return _NORM_FUNCTION_LOOKUP[norm](self.data, **kwargs) + return _NORM_FUNCTION_LOOKUP[norm](self._data, **kwargs) def proj(self): """Form the projector from a given ket or bra vector. @@ -755,8 +745,8 @@ def purity(self): if self.type in ("super", "operator-ket", "operator-bra"): raise TypeError('purity is only defined for states.') if self.isket or self.isbra: - return _data.norm.l2(self.data)**2 - return _data.trace(_data.matmul(self.data, self.data)).real + return _data.norm.l2(self._data)**2 + return _data.trace(_data.matmul(self._data, self._data)).real def full(self, order='C', squeeze=False): """Dense array from quantum object. @@ -794,7 +784,7 @@ def data_as(self, format=None, copy=True): data : numpy.ndarray, scipy.sparse.matrix_csr, etc. Matrix in the type of the underlying libraries. """ - return _data.extract(self.data, format, copy) + return _data.extract(self._data, format, copy) def diag(self): """Diagonal elements of quantum object. @@ -1733,14 +1723,39 @@ def isunitary(self): return self._isunitary @property - def shape(self): return self.data.shape - - isbra = property(isbra) - isket = property(isket) - isoper = property(isoper) - issuper = property(issuper) - isoperbra = property(isoperbra) - isoperket = property(isoperket) + def shape(self): + """Return the shape of the Qobj data.""" + return self._data.shape + + @property + def isoper(self): + """Indicates if the Qobj represents an operator.""" + return self._dims.type in ['oper', 'scalar'] + + @property + def isbra(self): + """Indicates if the Qobj represents a bra state.""" + return self._dims.type in ['bra', 'scalar'] + + @property + def isket(self): + """Indicates if the Qobj represents a ket state.""" + return self._dims.type in ['ket', 'scalar'] + + @property + def issuper(self): + """Indicates if the Qobj represents a superoperator.""" + return self._dims.type == 'super' + + @property + def isoperket(self): + """Indicates if the Qobj represents a operator-ket state.""" + return self._dims.type == 'operator-ket' + + @property + def isoperbra(self): + """Indicates if the Qobj represents a operator-bra state.""" + return self._dims.type == 'operator-bra' def ptrace(Q, sel): diff --git a/qutip/core/states.py b/qutip/core/states.py index 79bc89529a..a5139b46f9 100644 --- a/qutip/core/states.py +++ b/qutip/core/states.py @@ -295,7 +295,9 @@ def coherent_dm(N, alpha, offset=0, method='operator', *, dtype=None): """ dtype = dtype or settings.core["default_dtype"] or _data.Dense - return coherent(N, alpha, offset=offset, method=method, dtype=dtype).proj() + return coherent( + N, alpha, offset=offset, method=method, dtype=dtype + ).proj().to(dtype) def fock_dm(dimensions, n=None, offset=None, *, dtype=None): @@ -340,7 +342,7 @@ def fock_dm(dimensions, n=None, offset=None, *, dtype=None): """ dtype = dtype or settings.core["default_dtype"] or _data.Dia - return basis(dimensions, n, offset=offset, dtype=dtype).proj() + return basis(dimensions, n, offset=offset, dtype=dtype).proj().to(dtype) def fock(dimensions, n=None, offset=None, *, dtype=None): @@ -550,8 +552,10 @@ def projection(N, n, m, offset=None, *, dtype=None): Requested projection operator. """ dtype = dtype or settings.core["default_dtype"] or _data.CSR - return basis(N, n, offset=offset, dtype=dtype) @ \ - basis(N, m, offset=offset, dtype=dtype).dag() + return ( + basis(N, n, offset=offset, dtype=dtype) @ \ + basis(N, m, offset=offset, dtype=dtype).dag() + ).to(dtype) def qstate(string, *, dtype=None): @@ -1154,10 +1158,15 @@ def triplet_states(*, dtype=None): trip_states : list 2 particle triplet states """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense return [ basis([2, 2], [1, 1], dtype=dtype), - np.sqrt(0.5) * (basis([2, 2], [0, 1], dtype=dtype) + - basis([2, 2], [1, 0], dtype=dtype)), + ( + np.sqrt(0.5) * ( + basis([2, 2], [0, 1], dtype=dtype) + + basis([2, 2], [1, 0], dtype=dtype) + ) + ).to(dtype), basis([2, 2], [0, 0], dtype=dtype), ] @@ -1181,12 +1190,13 @@ def w_state(N=3, *, dtype=None): W : :obj:`.Qobj` N-qubit W-state """ + dtype = dtype or settings.core["default_dtype"] or _data.Dense inds = np.zeros(N, dtype=int) inds[0] = 1 state = basis([2]*N, list(inds), dtype=dtype) for kk in range(1, N): state += basis([2]*N, list(np.roll(inds, kk)), dtype=dtype) - return np.sqrt(1 / N) * state + return (np.sqrt(1 / N) * state).to(dtype) def ghz_state(N=3, *, dtype=None): @@ -1208,5 +1218,10 @@ def ghz_state(N=3, *, dtype=None): G : qobj N-qubit GHZ-state """ - return np.sqrt(0.5) * (basis([2]*N, [0]*N, dtype=dtype) + - basis([2]*N, [1]*N, dtype=dtype)) + dtype = dtype or settings.core["default_dtype"] or _data.Dense + return ( + np.sqrt(0.5) * ( + basis([2]*N, [0]*N, dtype=dtype) + + basis([2]*N, [1]*N, dtype=dtype) + ) + ).to(dtype) diff --git a/qutip/solver/heom/bofin_solvers.py b/qutip/solver/heom/bofin_solvers.py index bb49890be2..23b82f629f 100644 --- a/qutip/solver/heom/bofin_solvers.py +++ b/qutip/solver/heom/bofin_solvers.py @@ -385,7 +385,7 @@ def _post_init(self): self.store_ados = self.options["store_ados"] if self.store_ados: - self.final_ado_state = None + self._final_ado_state = None self.ado_states = [] def _e_op_func(self, e_op): @@ -407,9 +407,17 @@ def _store_state(self, t, ado_state): self.ado_states.append(ado_state) def _store_final_state(self, t, ado_state): - self.final_state = ado_state.rho + self._final_state = ado_state.rho if self.store_ados: - self.final_ado_state = ado_state + self._final_ado_state = ado_state + + @property + def final_ado_state(self): + if self._final_ado_state is not None: + return self._final_state + if self.ado_states: + return self.ado_states[-1] + return None def heomsolve( diff --git a/qutip/solver/mcsolve.py b/qutip/solver/mcsolve.py index a55b1a094c..66005e1080 100644 --- a/qutip/solver/mcsolve.py +++ b/qutip/solver/mcsolve.py @@ -62,8 +62,6 @@ def mcsolve(H, state, tlist, c_ops=(), e_ops=None, ntraj=500, *, | Whether or not to store the state vectors or density matrices. On `None` the states will be saved if no expectation operators are given. - - | normalize_output : bool - | Normalize output state to hide ODE numerical errors. - | progress_bar : str {'text', 'enhanced', 'tqdm', ''} | How to present the solver progress. 'tqdm' uses the python module of the same name and raise an error @@ -75,18 +73,18 @@ def mcsolve(H, state, tlist, c_ops=(), e_ops=None, ntraj=500, *, - | atol, rtol : float | Absolute and relative tolerance of the ODE integrator. - | nsteps : int - | Maximum number of (internally defined) steps allowed in one ``tlist`` - step. + | Maximum number of (internally defined) steps allowed in one + ``tlist`` step. - | max_step : float - | Maximum lenght of one internal step. When using pulses, it should be - less than half the width of the thinnest pulse. + | Maximum length of one internal step. When using pulses, it should + be less than half the width of the thinnest pulse. - | keep_runs_results : bool, [False] | Whether to store results from all trajectories or just store the averages. - - | map : str {"serial", "parallel", "loky"} - | How to run the trajectories. "parallel" uses concurent module to - run in parallel while "loky" use the module of the same name to do - so. + - | map : str {"serial", "parallel", "loky", "mpi"} + | How to run the trajectories. "parallel" uses the multiprocessing + module to run in parallel while "loky" and "mpi" use the "loky" and + "mpi4py" modules to do so. - | num_cpus : int | Number of cpus to use when running in parallel. ``None`` detect the number of available cpus. @@ -102,6 +100,12 @@ def mcsolve(H, state, tlist, c_ops=(), e_ops=None, ntraj=500, *, | Whether to use the improved sampling algorithm from Abdelhafez et al. PRA (2019) + Additional options are listed under + `options <./classes.html#qutip.solver.mcsolve.MCSolver.options>`__. + More options may be available depending on the selected + differential equation integration method, see + `Integrator <./classes.html#classes-ode>`_. + seeds : int, SeedSequence, list, optional Seed for the random number generator. It can be a single seed used to spawn seeds for each trajectory or a list of seeds, one for each @@ -167,6 +171,7 @@ class _MCSystem(_MTSystem): """ Container for the operators of the solver. """ + def __init__(self, rhs, c_ops, n_ops): self.rhs = rhs self.c_ops = c_ops @@ -247,8 +252,8 @@ def set_state(self, t, state0, generator, self.target_norm = 0.0 else: self.target_norm = ( - self._generator.random() * (1 - jump_prob_floor) - + jump_prob_floor + self._generator.random() * (1 - jump_prob_floor) + + jump_prob_floor ) self._integrator.set_state(t, state0) self._is_set = True @@ -412,10 +417,11 @@ class MCSolver(MultiTrajSolver): "store_final_state": False, "store_states": None, "keep_runs_results": False, - "method": "adams", "map": "serial", + "mpi_options": {}, "num_cpus": None, "bitgenerator": None, + "method": "adams", "mc_corr_eps": 1e-10, "norm_steps": 5, "norm_t_tol": 1e-6, @@ -581,24 +587,30 @@ def options(self): progress_bar: str {'text', 'enhanced', 'tqdm', ''}, default: "text" How to present the solver progress. - 'tqdm' uses the python module of the same name and raise an error if - not installed. Empty string or False will disable the bar. + 'tqdm' uses the python module of the same name and raise an error + if not installed. Empty string or False will disable the bar. progress_kwargs: dict, default: {"chunk_size":10} Arguments to pass to the progress_bar. Qutip's bars use ``chunk_size``. keep_runs_results: bool, default: False - Whether to store results from all trajectories or just store the - averages. + Whether to store results from all trajectories or just store the + averages. method: str, default: "adams" - Which ODE integrator methods are supported. - - map: str {"serial", "parallel", "loky"}, default: "serial" - How to run the trajectories. "parallel" uses concurent module to - run in parallel while "loky" use the module of the same name to do - so. + Which differential equation integration method to use. + + map: str {"serial", "parallel", "loky", "mpi"}, default: "serial" + How to run the trajectories. "parallel" uses the multiprocessing + module to run in parallel while "loky" and "mpi" use the "loky" and + "mpi4py" modules to do so. + + mpi_options: dict, default: {} + Only applies if map is "mpi". This dictionary will be passed as + keyword arguments to the `mpi4py.futures.MPIPoolExecutor` + constructor. Note that the `max_workers` argument is provided + separately through the `num_cpus` option. num_cpus: None, int Number of cpus to use when running in parallel. ``None`` detect the diff --git a/qutip/solver/multitraj.py b/qutip/solver/multitraj.py index 60f52a7202..3ca7930a96 100644 --- a/qutip/solver/multitraj.py +++ b/qutip/solver/multitraj.py @@ -64,6 +64,7 @@ class MultiTrajSolver(Solver): "normalize_output": False, "method": "", "map": "serial", + "mpi_options": {}, "num_cpus": None, "bitgenerator": None, } @@ -142,11 +143,11 @@ def _initialize_run(self, state, ntraj=1, args=None, e_ops=(), ) result.add_end_condition(ntraj, target_tol) - map_func = _get_map[self.options['map']] - map_kw = { + map_func, map_kw = _get_map(self.options) + map_kw.update({ 'timeout': timeout, 'num_cpus': self.options['num_cpus'], - } + }) state0 = self._prepare_state(state) stats['preparation time'] += time() - start_time return stats, seeds, result, map_func, map_kw, state0 diff --git a/qutip/solver/nm_mcsolve.py b/qutip/solver/nm_mcsolve.py index 690a86c1be..6c064bb975 100644 --- a/qutip/solver/nm_mcsolve.py +++ b/qutip/solver/nm_mcsolve.py @@ -83,8 +83,6 @@ def nm_mcsolve(H, state, tlist, ops_and_rates=(), e_ops=None, ntraj=500, *, | Whether or not to store the state vectors or density matrices. On `None` the states will be saved if no expectation operators are given. - - | normalize_output : bool - | Normalize output state to hide ODE numerical errors. - | progress_bar : str {'text', 'enhanced', 'tqdm', ''} | How to present the solver progress. 'tqdm' uses the python module of the same name and raise an error @@ -96,18 +94,18 @@ def nm_mcsolve(H, state, tlist, ops_and_rates=(), e_ops=None, ntraj=500, *, - | atol, rtol : float | Absolute and relative tolerance of the ODE integrator. - | nsteps : int - | Maximum number of (internally defined) steps allowed in one ``tlist`` - step. + | Maximum number of (internally defined) steps allowed in one + ``tlist`` step. - | max_step : float - | Maximum lenght of one internal step. When using pulses, it should be - less than half the width of the thinnest pulse. + | Maximum length of one internal step. When using pulses, it should + be less than half the width of the thinnest pulse. - | keep_runs_results : bool, [False] | Whether to store results from all trajectories or just store the averages. - - | map : str {"serial", "parallel", "loky"} - | How to run the trajectories. "parallel" uses concurent module to - run in parallel while "loky" use the module of the same name to do - so. + - | map : str {"serial", "parallel", "loky", "mpi"} + | How to run the trajectories. "parallel" uses the multiprocessing + module to run in parallel while "loky" and "mpi" use the "loky" and + "mpi4py" modules to do so. - | num_cpus : int | Number of cpus to use when running in parallel. ``None`` detect the number of available cpus. @@ -119,19 +117,21 @@ def nm_mcsolve(H, state, tlist, ops_and_rates=(), e_ops=None, ntraj=500, *, - | mc_corr_eps : float | Small number used to detect non-physical collapse caused by numerical imprecision. - - | improved_sampling : Bool - | Whether to use the improved sampling algorithm from Abdelhafez et - al. PRA (2019) - | completeness_rtol, completeness_atol : float, float | Parameters used in determining whether the given Lindblad operators satisfy a certain completeness relation. If they do not, an additional Lindblad operator is added automatically (with zero rate). - | martingale_quad_limit : float or int - An upper bound on the number of subintervals used in the adaptive + | An upper bound on the number of subintervals used in the adaptive integration of the martingale. Note that the 'improved_sampling' option is not currently supported. + Additional options are listed under `options + <./classes.html#qutip.solver.nm_mcsolve.NonMarkovianMCSolver.options>`__. + More options may be available depending on the selected + differential equation integration method, see + `Integrator <./classes.html#classes-ode>`_. seeds : int, SeedSequence, list, optional Seed for the random number generator. It can be a single seed used to @@ -325,12 +325,24 @@ class NonMarkovianMCSolver(MCSolver): name = "nm_mcsolve" _resultclass = NmmcResult solver_options = { - **MCSolver.solver_options, + "progress_bar": "text", + "progress_kwargs": {"chunk_size": 10}, + "store_final_state": False, + "store_states": None, + "keep_runs_results": False, + "map": "serial", + "mpi_options": {}, + "num_cpus": None, + "bitgenerator": None, + "method": "adams", + "mc_corr_eps": 1e-10, + "norm_steps": 5, + "norm_t_tol": 1e-6, + "norm_tol": 1e-4, "completeness_rtol": 1e-5, "completeness_atol": 1e-8, "martingale_quad_limit": 100, } - del solver_options["improved_sampling"] # both classes will be partially initialized in constructor _trajectory_resultclass = NmmcTrajectoryResult @@ -546,16 +558,22 @@ def options(self): ``chunk_size``. keep_runs_results: bool, default: False - Whether to store results from all trajectories or just store the - averages. + Whether to store results from all trajectories or just store the + averages. method: str, default: "adams" - Which ODE integrator methods are supported. - - map: str {"serial", "parallel", "loky"}, default: "serial" - How to run the trajectories. "parallel" uses concurent module to - run in parallel while "loky" use the module of the same name to do - so. + Which differential equation integration method to use. + + map: str {"serial", "parallel", "loky", "mpi"}, default: "serial" + How to run the trajectories. "parallel" uses the multiprocessing + module to run in parallel while "loky" and "mpi" use the "loky" and + "mpi4py" modules to do so. + + mpi_options: dict, default: {} + Only applies if map is "mpi". This dictionary will be passed as + keyword arguments to the `mpi4py.futures.MPIPoolExecutor` + constructor. Note that the `max_workers` argument is provided + separately through the `num_cpus` option. num_cpus: None, int Number of cpus to use when running in parallel. ``None`` detect the diff --git a/qutip/solver/parallel.py b/qutip/solver/parallel.py index 880365a30d..425a317f21 100644 --- a/qutip/solver/parallel.py +++ b/qutip/solver/parallel.py @@ -3,7 +3,7 @@ mappings, using the builtin Python module multiprocessing or the loky parallel execution library. """ -__all__ = ['parallel_map', 'serial_map', 'loky_pmap'] +__all__ = ['parallel_map', 'serial_map', 'loky_pmap', 'mpi_pmap'] import multiprocessing import os @@ -11,6 +11,7 @@ import time import threading import concurrent.futures +import warnings from qutip.ui.progressbar import progress_bars from qutip.settings import available_cpu_count @@ -131,57 +132,33 @@ def serial_map(task, values, task_args=None, task_kwargs=None, return results -def parallel_map(task, values, task_args=None, task_kwargs=None, - reduce_func=None, map_kw=None, - progress_bar=None, progress_bar_kwargs={}): +def _generic_pmap(task, values, task_args, task_kwargs, reduce_func, + timeout, fail_fast, num_workers, + progress_bar, progress_bar_kwargs, + setup_executor, extract_result, shutdown_executor): """ - Parallel execution of a mapping of ``values`` to the function ``task``. - This is functionally equivalent to:: - - result = [task(value, *task_args, **task_kwargs) for value in values] + Common functionality for parallel_map, loky_pmap and mpi_pmap. + The parameters `setup_executor`, `extract_result` and `shutdown_executor` + are callback functions with the following signatures: - Parameters - ---------- - task : a Python function - The function that is to be called for each value in ``task_vec``. - values : array / list - The list or array of values for which the ``task`` function is to be - evaluated. - task_args : list, optional - The optional additional arguments to the ``task`` function. - task_kwargs : dictionary, optional - The optional additional keyword arguments to the ``task`` function. - reduce_func : func, optional - If provided, it will be called with the output of each task instead of - storing them in a list. Note that the order in which results are - passed to ``reduce_func`` is not defined. It should return None or a - number. When returning a number, it represents the estimation of the - number of tasks left. On a return <= 0, the map will end early. - progress_bar : str, optional - Progress bar options's string for showing progress. - progress_bar_kwargs : dict, optional - Options for the progress bar. - map_kw: dict, optional - Dictionary containing entry for: - - timeout: float, Maximum time (sec) for the whole map. - - num_cpus: int, Number of jobs to run at once. - - fail_fast: bool, Abort at the first error. + setup_executor: () -> ProcessPoolExecutor - Returns - ------- - result : list - The result list contains the value of - ``task(value, *task_args, **task_kwargs)`` for - each value in ``values``. If a ``reduce_func`` is provided, and empty - list will be returned. + extract_result: Future -> (Any, BaseException) + If there was an exception e, returns (None, e). + Otherwise returns (result, None). + shutdown_executor: (executor: ProcessPoolExecutor, + active_tasks: set[Future]) -> None + executor: The ProcessPoolExecutor that was created in setup_executor + active_tasks: A set of Futures that are currently still being executed + (non-empty if: timeout, error, or reduce_func requesting exit) """ + if task_args is None: task_args = () if task_kwargs is None: task_kwargs = {} - map_kw = _read_map_kw(map_kw) - end_time = map_kw['timeout'] + time.time() + end_time = timeout + time.time() progress_bar = progress_bars[progress_bar]( len(values), **progress_bar_kwargs @@ -191,6 +168,7 @@ def parallel_map(task, values, task_args=None, task_kwargs=None, finished = [] if reduce_func is not None: results = None + def result_func(_, value): return reduce_func(value) else: @@ -199,42 +177,38 @@ def result_func(_, value): def _done_callback(future): if not future.cancelled(): - ex = future.exception() - if isinstance(ex, KeyboardInterrupt): + result, exception = extract_result(future) + if isinstance(exception, KeyboardInterrupt): # When a keyboard interrupt happens, it is raised in the main # thread and in all worker threads. At this point in the code, # the worker threads have already returned and the main thread # is only waiting for the ProcessPoolExecutor to shutdown # before exiting. We therefore return immediately. return - if isinstance(ex, Exception): - errors[future._i] = ex + if exception is not None: + if isinstance(exception, Exception): + errors[future._i] = exception + else: + raise exception else: - result = future.result() remaining_ntraj = result_func(future._i, result) if remaining_ntraj is not None and remaining_ntraj <= 0: finished.append(True) progress_bar.update() - if sys.version_info >= (3, 7): - # ProcessPoolExecutor only supports mp_context from 3.7 onwards - ctx_kw = {"mp_context": mp_context} - else: - ctx_kw = {} - os.environ['QUTIP_IN_PARALLEL'] = 'TRUE' try: - with concurrent.futures.ProcessPoolExecutor( - max_workers=map_kw['num_cpus'], **ctx_kw, - ) as executor: + with setup_executor() as executor: waiting = set() i = 0 + aborted = False + while i < len(values): # feed values to the executor, ensuring that there is at # most one task per worker at any moment in time so that # we can shutdown without waiting for greater than the time # taken by the longest task - if len(waiting) >= map_kw['num_cpus']: + if len(waiting) >= num_workers: # no space left, wait for a task to complete or # the time to run out timeout = max(0, end_time - time.time()) @@ -245,12 +219,13 @@ def _done_callback(future): ) if ( time.time() >= end_time - or (errors and map_kw['fail_fast']) + or (errors and fail_fast) or finished ): # no time left, exit the loop + aborted = True break - while len(waiting) < map_kw['num_cpus'] and i < len(values): + while len(waiting) < num_workers and i < len(values): # space and time available, add tasks value = values[i] future = executor.submit( @@ -264,13 +239,21 @@ def _done_callback(future): waiting.add(future) i += 1 - timeout = max(0, end_time - time.time()) - concurrent.futures.wait(waiting, timeout=timeout) + if not aborted: + # all tasks have been submitted, timeout has not been reaches + # -> wait for all workers to finish before shutting down + timeout = max(0, end_time - time.time()) + _done, waiting = concurrent.futures.wait( + waiting, + timeout=timeout, + return_when=concurrent.futures.ALL_COMPLETED + ) + shutdown_executor(executor, waiting) finally: os.environ['QUTIP_IN_PARALLEL'] = 'FALSE' progress_bar.finished() - if errors and map_kw["fail_fast"]: + if errors and fail_fast: raise list(errors.values())[0] elif errors: raise MapExceptions( @@ -281,6 +264,83 @@ def _done_callback(future): return results +def parallel_map(task, values, task_args=None, task_kwargs=None, + reduce_func=None, map_kw=None, + progress_bar=None, progress_bar_kwargs={}): + """ + Parallel execution of a mapping of ``values`` to the function ``task``. + This is functionally equivalent to:: + + result = [task(value, *task_args, **task_kwargs) for value in values] + + Parameters + ---------- + task : a Python function + The function that is to be called for each value in ``task_vec``. + values : array / list + The list or array of values for which the ``task`` function is to be + evaluated. + task_args : list, optional + The optional additional arguments to the ``task`` function. + task_kwargs : dictionary, optional + The optional additional keyword arguments to the ``task`` function. + reduce_func : func, optional + If provided, it will be called with the output of each task instead of + storing them in a list. Note that the order in which results are + passed to ``reduce_func`` is not defined. It should return None or a + number. When returning a number, it represents the estimation of the + number of tasks left. On a return <= 0, the map will end early. + progress_bar : str, optional + Progress bar options's string for showing progress. + progress_bar_kwargs : dict, optional + Options for the progress bar. + map_kw: dict, optional + Dictionary containing entry for: + - timeout: float, Maximum time (sec) for the whole map. + - num_cpus: int, Number of jobs to run at once. + - fail_fast: bool, Abort at the first error. + + Returns + ------- + result : list + The result list contains the value of + ``task(value, *task_args, **task_kwargs)`` for + each value in ``values``. If a ``reduce_func`` is provided, and empty + list will be returned. + + """ + + map_kw = _read_map_kw(map_kw) + if sys.version_info >= (3, 7): + # ProcessPoolExecutor only supports mp_context from 3.7 onwards + ctx_kw = {"mp_context": mp_context} + else: + ctx_kw = {} + + def setup_executor(): + return concurrent.futures.ProcessPoolExecutor( + max_workers=map_kw['num_cpus'], **ctx_kw, + ) + + def extract_result(future: concurrent.futures.Future): + exception = future.exception() + if exception is not None: + return None, exception + return future.result(), None + + def shutdown_executor(executor, _): + # Since `ProcessPoolExecutor` leaves no other option, + # we wait for all worker processes to finish their current task + executor.shutdown() + + return _generic_pmap( + task, values, task_args, task_kwargs, reduce_func, + map_kw['timeout'], map_kw['fail_fast'], map_kw['num_cpus'], + progress_bar, progress_bar_kwargs, + setup_executor, extract_result, shutdown_executor + ) + + def loky_pmap(task, values, task_args=None, task_kwargs=None, reduce_func=None, map_kw=None, progress_bar=None, progress_bar_kwargs={}): @@ -305,8 +365,8 @@ def loky_pmap(task, values, task_args=None, task_kwargs=None, The optional additional keyword arguments to the ``task`` function. reduce_func : func, optional If provided, it will be called with the output of each task instead of - storing them in a list. Note that the results are passed to - ``reduce_func`` in the original order. It should return None or a + storing them in a list. Note that the order in which results are + passed to ``reduce_func`` is not defined. It should return None or a number. When returning a number, it represents the estimation of the number of tasks left. On a return <= 0, the map will end early. progress_bar : str, optional @@ -328,74 +388,156 @@ def loky_pmap(task, values, task_args=None, task_kwargs=None, list will be returned. """ - if task_args is None: - task_args = () - if task_kwargs is None: - task_kwargs = {} + + from loky import get_reusable_executor + from loky.process_executor import ShutdownExecutorError map_kw = _read_map_kw(map_kw) - end_time = map_kw['timeout'] + time.time() - progress_bar = progress_bars[progress_bar]( - len(values), **progress_bar_kwargs + def setup_executor(): + return get_reusable_executor(max_workers=map_kw['num_cpus']) + + def extract_result(future: concurrent.futures.Future): + exception = future.exception() + if isinstance(exception, ShutdownExecutorError): + # Task was aborted due to timeout etc + return None, None + if exception is not None: + return None, exception + return future.result(), None + + def shutdown_executor(executor, active_tasks): + # If there are still tasks running, we kill all workers in order to + # return immediately. Otherwise, `kill_workers` is set to False so + # that the worker threads can be reused in subsequent loky_pmap calls. + kill_workers = len(active_tasks) > 0 + executor.shutdown(kill_workers=kill_workers) + + return _generic_pmap( + task, values, task_args, task_kwargs, reduce_func, + map_kw['timeout'], map_kw['fail_fast'], map_kw['num_cpus'], + progress_bar, progress_bar_kwargs, + setup_executor, extract_result, shutdown_executor ) - errors = {} - remaining_ntraj = None - if reduce_func is None: - results = [None] * len(values) - else: - results = None - os.environ['QUTIP_IN_PARALLEL'] = 'TRUE' - from loky import get_reusable_executor, TimeoutError - try: - executor = get_reusable_executor(max_workers=map_kw['num_cpus']) - - jobs = [executor.submit(task, value, *task_args, **task_kwargs) - for value in values] - - for n, job in enumerate(jobs): - remaining_time = max(end_time - time.time(), 0) - try: - result = job.result(remaining_time) - except TimeoutError: - [job.cancel() for job in jobs] - break - except Exception as err: - if map_kw["fail_fast"]: - raise err - else: - errors[n] = err - else: - if reduce_func is not None: - remaining_ntraj = reduce_func(result) - else: - results[n] = result - progress_bar.update() - if remaining_ntraj is not None and remaining_ntraj <= 0: - break +def mpi_pmap(task, values, task_args=None, task_kwargs=None, + reduce_func=None, map_kw=None, + progress_bar=None, progress_bar_kwargs={}): + """ + Parallel execution of a mapping of ``values`` to the function ``task``. + This is functionally equivalent to:: - except KeyboardInterrupt as e: - [job.cancel() for job in jobs] - raise e + result = [task(value, *task_args, **task_kwargs) for value in values] - finally: - os.environ['QUTIP_IN_PARALLEL'] = 'FALSE' - executor.shutdown(kill_workers=True) + Uses the mpi4py module to execute the tasks asynchronously with MPI + processes. For more information, consult the documentation of mpi4py and + the mpi4py.MPIPoolExecutor class. - progress_bar.finished() - if errors: - raise MapExceptions( - f"{len(errors)} iterations failed in loky_pmap", - errors, results - ) - return results + Note: in keeping consistent with the API of `parallel_map`, the parameter + determining the number of requested worker processes is called `num_cpus`. + The value of `map_kw['num_cpus']` is passed to the MPIPoolExecutor as its + `max_workers` argument. + If this parameter is not provided, the environment variable + `QUTIP_NUM_PROCESSES` is used instead. If this environment variable is not + set either, QuTiP will use default values that might be unsuitable for MPI + applications. + + Parameters + ---------- + task : a Python function + The function that is to be called for each value in ``task_vec``. + values : array / list + The list or array of values for which the ``task`` function is to be + evaluated. + task_args : list, optional + The optional additional arguments to the ``task`` function. + task_kwargs : dictionary, optional + The optional additional keyword arguments to the ``task`` function. + reduce_func : func, optional + If provided, it will be called with the output of each task instead of + storing them in a list. Note that the order in which results are + passed to ``reduce_func`` is not defined. It should return None or a + number. When returning a number, it represents the estimation of the + number of tasks left. On a return <= 0, the map will end early. + progress_bar : str, optional + Progress bar options's string for showing progress. + progress_bar_kwargs : dict, optional + Options for the progress bar. + map_kw: dict, optional + Dictionary containing entry for: + - timeout: float, Maximum time (sec) for the whole map. + - num_cpus: int, Number of jobs to run at once. + - fail_fast: bool, Abort at the first error. + All remaining entries of map_kw will be passed to the + mpi4py.MPIPoolExecutor constructor. + + Returns + ------- + result : list + The result list contains the value of + ``task(value, *task_args, **task_kwargs)`` for + each value in ``values``. If a ``reduce_func`` is provided, and empty + list will be returned. + """ + + from mpi4py.futures import MPIPoolExecutor + + # If the provided num_cpus is None, we use the default value instead. + # We thus intentionally make it impossible to call + # MPIPoolExecutor(max_workers=None, ...) + # in which case mpi4py would determine a default value. That would be + # useful, but unfortunately mpi4py provides no public API to access the + # actual number of workers that is used in that case, which we would need. + worker_number_provided = ( + ((map_kw is not None) and ('num_cpus' in map_kw)) + or 'QUTIP_NUM_PROCESSES' in os.environ) + + map_kw = _read_map_kw(map_kw) + timeout = map_kw.pop('timeout') + num_workers = map_kw.pop('num_cpus') + fail_fast = map_kw.pop('fail_fast') + + if not worker_number_provided: + warnings.warn(f'mpi_pmap was called without specifying the number of ' + f'worker processes, using the default {num_workers}') + + def setup_executor(): + return MPIPoolExecutor(max_workers=num_workers, **map_kw) + + def extract_result(future): + exception = future.exception() + if exception is not None: + return None, exception + return future.result(), None + + def shutdown_executor(executor, _): + executor.shutdown() + + return _generic_pmap( + task, values, task_args, task_kwargs, reduce_func, + timeout, fail_fast, num_workers, + progress_bar, progress_bar_kwargs, + setup_executor, extract_result, shutdown_executor + ) -_get_map = { + +_maps = { "parallel_map": parallel_map, "parallel": parallel_map, "serial_map": serial_map, "serial": serial_map, "loky": loky_pmap, + "mpi": mpi_pmap } + + +def _get_map(options): + map_func = _maps[options['map']] + + if map_func == mpi_pmap: + map_kw = options['mpi_options'] + else: + map_kw = {} + + return map_func, map_kw diff --git a/qutip/solver/result.py b/qutip/solver/result.py index b1d1bf2bbd..2f32dddbc9 100644 --- a/qutip/solver/result.py +++ b/qutip/solver/result.py @@ -1,9 +1,17 @@ """ Class for solve function results""" + +from typing import TypedDict import numpy as np from ..core import Qobj, QobjEvo, expect, isket, ket2dm, qzero_like -__all__ = ["Result", "MultiTrajResult", "McResult", "NmmcResult", - "McTrajectoryResult", "McResultImprovedSampling"] +__all__ = [ + "Result", + "MultiTrajResult", + "McResult", + "NmmcResult", + "McTrajectoryResult", + "McResultImprovedSampling", +] class _QobjExpectEop: @@ -16,6 +24,7 @@ class _QobjExpectEop: op : :obj:`.Qobj` The expectation value operator. """ + def __init__(self, op): self.op = op @@ -46,6 +55,7 @@ class ExpectOp: op : object The original object used to define the e_op operation. """ + def __init__(self, op, f, append): self.op = op self._f = f @@ -70,6 +80,7 @@ class _BaseResult: """ Common method for all ``Result``. """ + def __init__(self, options, *, solver=None, stats=None): self.solver = solver if stats is None: @@ -81,12 +92,12 @@ def __init__(self, options, *, solver=None, stats=None): # make sure not to store a reference to the solver options_copy = options.copy() - if hasattr(options_copy, '_feedback'): + if hasattr(options_copy, "_feedback"): options_copy._feedback = None self.options = options_copy def _e_ops_to_dict(self, e_ops): - """ Convert the supplied e_ops to a dictionary of Eop instances. """ + """Convert the supplied e_ops to a dictionary of Eop instances.""" if e_ops is None: e_ops = {} elif isinstance(e_ops, (list, tuple)): @@ -118,6 +129,11 @@ def add_processor(self, f, requires_copy=False): self._state_processors_require_copy |= requires_copy +class ResultOptions(TypedDict): + store_states: bool + store_final_state: bool + + class Result(_BaseResult): """ Base class for storing solver results. @@ -199,7 +215,18 @@ class Result(_BaseResult): options : dict The options for this result class. """ - def __init__(self, e_ops, options, *, solver=None, stats=None, **kw): + + options: ResultOptions + + def __init__( + self, + e_ops, + options: ResultOptions, + *, + solver=None, + stats=None, + **kw, + ): super().__init__(options, solver=solver, stats=stats) raw_ops = self._e_ops_to_dict(e_ops) self.e_data = {k: [] for k in raw_ops} @@ -211,7 +238,7 @@ def __init__(self, e_ops, options, *, solver=None, stats=None, **kw): self.times = [] self.states = [] - self.final_state = None + self._final_state = None self._post_init(**kw) @@ -243,29 +270,29 @@ def _post_init(self): Sub-class ``.post_init()`` implementation may take additional keyword arguments if required. """ - store_states = self.options['store_states'] - store_final_state = self.options['store_final_state'] - - if store_states is None: - store_states = len(self.e_ops) == 0 + store_states = self.options["store_states"] + store_states = store_states or ( + len(self.e_ops) == 0 and store_states is None + ) if store_states: self.add_processor(self._store_state, requires_copy=True) - if store_states or store_final_state: + store_final_state = self.options["store_final_state"] + if store_final_state and not store_states: self.add_processor(self._store_final_state, requires_copy=True) def _store_state(self, t, state): - """ Processor that stores a state in ``.states``. """ + """Processor that stores a state in ``.states``.""" self.states.append(state) def _store_final_state(self, t, state): - """ Processor that writes the state to ``.final_state``. """ - self.final_state = state + """Processor that writes the state to ``._final_state``.""" + self._final_state = state def _pre_copy(self, state): - """ Return a copy of the state. Sub-classes may override this to - copy a state in different manner or to skip making a copy - altogether if a copy is not necessary. + """Return a copy of the state. Sub-classes may override this to + copy a state in different manner or to skip making a copy + altogether if a copy is not necessary. """ return state.copy() @@ -311,10 +338,7 @@ def __repr__(self): ] if self.stats: lines.append(" Solver stats:") - lines.extend( - f" {k}: {v!r}" - for k, v in self.stats.items() - ) + lines.extend(f" {k}: {v!r}" for k, v in self.stats.items()) if self.times: lines.append( f" Time interval: [{self.times[0]}, {self.times[-1]}]" @@ -334,6 +358,20 @@ def __repr__(self): def expect(self): return [np.array(e_op) for e_op in self.e_data.values()] + @property + def final_state(self): + if self._final_state is not None: + return self._final_state + if self.states: + return self.states[-1] + return None + + +class MultiTrajResultOptions(TypedDict): + store_states: bool + store_final_state: bool + keep_runs_results: bool + class MultiTrajResult(_BaseResult): """ @@ -455,7 +493,18 @@ class MultiTrajResult(_BaseResult): options : :obj:`~SolverResultsOptions` The options for this result class. """ - def __init__(self, e_ops, options, *, solver=None, stats=None, **kw): + + options: MultiTrajResultOptions + + def __init__( + self, + e_ops, + options: MultiTrajResultOptions, + *, + solver=None, + stats=None, + **kw, + ): super().__init__(options, solver=solver, stats=stats) self._raw_ops = self._e_ops_to_dict(e_ops) @@ -471,15 +520,29 @@ def __init__(self, e_ops, options, *, solver=None, stats=None, **kw): self._target_tols = None self.average_e_data = {} - self.e_data = {} self.std_e_data = {} self.runs_e_data = {} self._post_init(**kw) + @property + def _store_average_density_matricies(self) -> bool: + return ( + self.options["store_states"] + or (self.options["store_states"] is None and self._raw_ops == {}) + ) and not self.options["keep_runs_results"] + + @property + def _store_final_density_matrix(self) -> bool: + return ( + self.options["store_final_state"] + and not self._store_average_density_matricies + and not self.options["keep_runs_results"] + ) + @staticmethod def _to_dm(state): - if state.type == 'ket': + if state.type == "ket": state = state.proj() return state @@ -489,10 +552,12 @@ def _add_first_traj(self, trajectory): """ self.times = trajectory.times - if trajectory.states: - self._sum_states = [qzero_like(self._to_dm(state)) - for state in trajectory.states] - if trajectory.final_state: + if trajectory.states and self._store_average_density_matricies: + self._sum_states = [ + qzero_like(self._to_dm(state)) for state in trajectory.states + ] + + if trajectory.final_state and self._store_final_density_matrix: state = trajectory.final_state self._sum_final_states = qzero_like(self._to_dm(state)) @@ -507,13 +572,10 @@ def _add_first_traj(self, trajectory): self.average_e_data = { k: list(avg_expect) - for k, avg_expect - in zip(self._raw_ops, self._sum_expect) + for k, avg_expect in zip(self._raw_ops, self._sum_expect) } - self.e_data = self.average_e_data - if self.options['keep_runs_results']: + if self.options["keep_runs_results"]: self.runs_e_data = {k: [] for k in self._raw_ops} - self.e_data = self.runs_e_data def _store_trajectory(self, trajectory): self.trajectories.append(trajectory) @@ -521,8 +583,7 @@ def _store_trajectory(self, trajectory): def _reduce_states(self, trajectory): self._sum_states = [ accu + self._to_dm(state) - for accu, state - in zip(self._sum_states, trajectory.states) + for accu, state in zip(self._sum_states, trajectory.states) ] def _reduce_final_state(self, trajectory): @@ -569,7 +630,7 @@ def _fixed_end(self): """ ntraj_left = self._target_ntraj - self.num_trajectories if ntraj_left == 0: - self.stats['end_condition'] = 'ntraj reached' + self.stats["end_condition"] = "ntraj reached" return ntraj_left def _average_computer(self): @@ -586,40 +647,36 @@ def _target_tolerance_end(self): if self.num_trajectories <= 1: return np.inf avg, avg2 = self._average_computer() - target = np.array([ - atol + rtol * mean - for mean, (atol, rtol) - in zip(avg, self._target_tols) - ]) + target = np.array( + [ + atol + rtol * mean + for mean, (atol, rtol) in zip(avg, self._target_tols) + ] + ) target_ntraj = np.max((avg2 - abs(avg) ** 2) / target**2 + 1) self._estimated_ntraj = min(target_ntraj, self._target_ntraj) if (self._estimated_ntraj - self.num_trajectories) <= 0: - self.stats['end_condition'] = 'target tolerance reached' + self.stats["end_condition"] = "target tolerance reached" return self._estimated_ntraj - self.num_trajectories def _post_init(self): self.num_trajectories = 0 self._target_ntraj = None - store_states = self.options['store_states'] - store_final_state = self.options['store_final_state'] - store_traj = self.options['keep_runs_results'] - self.add_processor(self._increment_traj) - if store_traj: + store_trajectory = self.options["keep_runs_results"] + if store_trajectory: self.add_processor(self._store_trajectory) - if store_states is None: - store_states = len(self._raw_ops) == 0 - if store_states: + if self._store_average_density_matricies: self.add_processor(self._reduce_states) - if store_states or store_final_state: + if self._store_final_density_matrix: self.add_processor(self._reduce_final_state) if self._raw_ops: self.add_processor(self._reduce_expect) self._early_finish_check = self._no_end - self.stats['end_condition'] = 'unknown' + self.stats["end_condition"] = "unknown" def add(self, trajectory_info): """ @@ -676,7 +733,7 @@ def add_end_condition(self, ntraj, target_tol=None): Error estimation is done with jackknife resampling. """ self._target_ntraj = ntraj - self.stats['end_condition'] = 'timeout' + self.stats["end_condition"] = "timeout" if target_tol is None: self._early_finish_check = self._fixed_end @@ -691,14 +748,16 @@ def add_end_condition(self, ntraj, target_tol=None): targets = np.array(target_tol) if targets.ndim == 0: - self._target_tols = np.array([(target_tol, 0.)] * num_e_ops) + self._target_tols = np.array([(target_tol, 0.0)] * num_e_ops) elif targets.shape == (2,): self._target_tols = np.ones((num_e_ops, 2)) * targets elif targets.shape == (num_e_ops, 2): self._target_tols = targets else: - raise ValueError("target_tol must be a number, a pair of (atol, " - "rtol) or a list of (atol, rtol) for each e_ops") + raise ValueError( + "target_tol must be a number, a pair of (atol, " + "rtol) or a list of (atol, rtol) for each e_ops" + ) self._early_finish_check = self._target_tolerance_end @@ -718,8 +777,18 @@ def average_states(self): States averages as density matrices. """ if self._sum_states is None: - return None - return [final / self.num_trajectories for final in self._sum_states] + if not (self.trajectories and self.trajectories[0].states): + return None + self._sum_states = [ + qzero_like(self._to_dm(state)) + for state in self.trajectories[0].states + ] + for trajectory in self.trajectories: + self._reduce_states(trajectory) + + return [ + final / self.num_trajectories for final in self._sum_states + ] @property def states(self): @@ -744,6 +813,8 @@ def average_final_state(self): Last states of each trajectories averaged into a density matrix. """ if self._sum_final_states is None: + if self.average_states is not None: + return self.average_states[-1] return None return self._sum_final_states / self.num_trajectories @@ -770,6 +841,10 @@ def runs_expect(self): def expect(self): return [np.array(val) for val in self.e_data.values()] + @property + def e_data(self): + return self.runs_e_data or self.average_e_data + def steady_state(self, N=0): """ Average the states of the last ``N`` times of every runs as a density @@ -796,16 +871,13 @@ def __repr__(self): ] if self.stats: lines.append(" Solver stats:") - lines.extend( - f" {k}: {v!r}" - for k, v in self.stats.items() - ) + lines.extend(f" {k}: {v!r}" for k, v in self.stats.items()) if self.times: lines.append( f" Time interval: [{self.times[0]}, {self.times[-1]}]" f" ({len(self.times)} steps)" ) - lines.append(f" Number of e_ops: {len(self.e_ops)}") + lines.append(f" Number of e_ops: {len(self.e_data)}") if self.states: lines.append(" States saved.") elif self.final_state is not None: @@ -827,23 +899,35 @@ def __add__(self, other): raise ValueError("Shared `e_ops` is required to merge results") if self.times != other.times: raise ValueError("Shared `times` are is required to merge results") - new = self.__class__(self._raw_ops, self.options, - solver=self.solver, stats=self.stats) + + new = self.__class__( + self._raw_ops, self.options, solver=self.solver, stats=self.stats + ) + new.e_ops = self.e_ops + if self.trajectories and other.trajectories: new.trajectories = self.trajectories + other.trajectories new.num_trajectories = self.num_trajectories + other.num_trajectories new.times = self.times new.seeds = self.seeds + other.seeds - if self._sum_states is not None and other._sum_states is not None: - new._sum_states = self._sum_states + other._sum_states + if ( + self._sum_states is not None + and other._sum_states is not None + ): + new._sum_states = [ + state1 + state2 for state1, state2 in zip( + self._sum_states, other._sum_states + ) + ] if ( self._sum_final_states is not None and other._sum_final_states is not None ): new._sum_final_states = ( - self._sum_final_states + other._sum_final_states + self._sum_final_states + + other._sum_final_states ) new._target_tols = None @@ -854,23 +938,21 @@ def __add__(self, other): for i, k in enumerate(self._raw_ops): new._sum_expect.append(self._sum_expect[i] + other._sum_expect[i]) - new._sum2_expect.append(self._sum2_expect[i] - + other._sum2_expect[i]) + new._sum2_expect.append( + self._sum2_expect[i] + other._sum2_expect[i] + ) avg = new._sum_expect[i] / new.num_trajectories avg2 = new._sum2_expect[i] / new.num_trajectories new.average_e_data[k] = list(avg) - new.e_data = new.average_e_data - new.std_e_data[k] = np.sqrt(np.abs(avg2 - np.abs(avg**2))) - if new.trajectories: + if self.runs_e_data and other.runs_e_data: new.runs_e_data[k] = self.runs_e_data[k] + other.runs_e_data[k] - new.e_data = new.runs_e_data new.stats["run time"] += other.stats["run time"] - new.stats['end_condition'] = "Merged results" + new.stats["end_condition"] = "Merged results" return new @@ -881,8 +963,9 @@ class McTrajectoryResult(Result): """ def __init__(self, e_ops, options, *args, **kwargs): - super().__init__(e_ops, {**options, "normalize_output": False}, - *args, **kwargs) + super().__init__( + e_ops, {**options, "normalize_output": False}, *args, **kwargs + ) class McResult(MultiTrajResult): @@ -922,6 +1005,7 @@ class McResult(MultiTrajResult): For each runs, a list of every collapse as a tuple of the time it happened and the corresponding ``c_ops`` index. """ + # Collapse are only produced by mcsolve. def _add_collapse(self, trajectory): @@ -941,7 +1025,7 @@ def col_times(self): out = [] for col_ in self.collapse: col = list(zip(*col_)) - col = ([] if len(col) == 0 else col[0]) + col = [] if len(col) == 0 else col[0] out.append(col) return out @@ -953,7 +1037,7 @@ def col_which(self): out = [] for col_ in self.collapse: col = list(zip(*col_)) - col = ([] if len(col) == 0 else col[1]) + col = [] if len(col) == 0 else col[1] out.append(col) return out @@ -968,7 +1052,9 @@ def photocurrent(self): for t, which in collapses: cols[which].append(t) mesurement = [ - np.histogram(cols[i], tlist)[0] / np.diff(tlist) / self.num_trajectories + np.histogram(cols[i], tlist)[0] + / np.diff(tlist) + / self.num_trajectories for i in range(self.num_c_ops) ] return mesurement @@ -984,10 +1070,12 @@ def runs_photocurrent(self): cols = [[] for _ in range(self.num_c_ops)] for t, which in collapses: cols[which].append(t) - measurements.append([ - np.histogram(cols[i], tlist)[0] / np.diff(tlist) - for i in range(self.num_c_ops) - ]) + measurements.append( + [ + np.histogram(cols[i], tlist)[0] / np.diff(tlist) + for i in range(self.num_c_ops) + ] + ) return measurements @@ -998,6 +1086,7 @@ class McResultImprovedSampling(McResult, MultiTrajResult): using the improved sampling algorithm, which samples the no-jump trajectory first and then only samples jump trajectories afterwards. """ + def __init__(self, e_ops, options, **kw): MultiTrajResult.__init__(self, e_ops=e_ops, options=options, **kw) self._sum_expect_no_jump = None @@ -1016,14 +1105,16 @@ def _reduce_states(self, trajectory): if self.num_trajectories == 1: self._sum_states_no_jump = [ accu + self._to_dm(state) - for accu, state - in zip(self._sum_states_no_jump, trajectory.states) + for accu, state in zip( + self._sum_states_no_jump, trajectory.states + ) ] else: self._sum_states_jump = [ accu + self._to_dm(state) - for accu, state - in zip(self._sum_states_jump, trajectory.states) + for accu, state in zip( + self._sum_states_jump, trajectory.states + ) ] def _reduce_final_state(self, trajectory): @@ -1040,13 +1131,15 @@ def _average_computer(self): def _add_first_traj(self, trajectory): super()._add_first_traj(trajectory) - if trajectory.states: + if trajectory.states and self._store_average_density_matricies: del self._sum_states - self._sum_states_no_jump = [qzero_like(self._to_dm(state)) - for state in trajectory.states] - self._sum_states_jump = [qzero_like(self._to_dm(state)) - for state in trajectory.states] - if trajectory.final_state: + self._sum_states_no_jump = [ + qzero_like(self._to_dm(state)) for state in trajectory.states + ] + self._sum_states_jump = [ + qzero_like(self._to_dm(state)) for state in trajectory.states + ] + if trajectory.final_state and self._store_final_density_matrix: state = trajectory.final_state del self._sum_final_states self._sum_final_states_no_jump = qzero_like(self._to_dm(state)) @@ -1063,10 +1156,12 @@ def _add_first_traj(self, trajectory): self._sum2_expect_no_jump = [ np.zeros_like(expect) for expect in trajectory.expect ] - self._sum_expect_jump = [np.zeros_like(expect) - for expect in trajectory.expect] - self._sum2_expect_jump = [np.zeros_like(expect) - for expect in trajectory.expect] + self._sum_expect_jump = [ + np.zeros_like(expect) for expect in trajectory.expect + ] + self._sum2_expect_jump = [ + np.zeros_like(expect) for expect in trajectory.expect + ] del self._sum_expect del self._sum2_expect @@ -1088,12 +1183,12 @@ def _reduce_expect(self, trajectory): else: self._sum_expect_jump[i] += expect_traj * (1 - p) self._sum2_expect_jump[i] += expect_traj**2 * (1 - p) - avg = (self._sum_expect_no_jump[i] - + self._sum_expect_jump[i] - / (self.num_trajectories - 1)) - avg2 = (self._sum2_expect_no_jump[i] - + self._sum2_expect_jump[i] - / (self.num_trajectories - 1)) + avg = self._sum_expect_no_jump[i] + ( + self._sum_expect_jump[i] / (self.num_trajectories - 1) + ) + avg2 = self._sum2_expect_no_jump[i] + ( + self._sum2_expect_jump[i] / (self.num_trajectories - 1) + ) self.average_e_data[k] = list(avg) @@ -1110,12 +1205,28 @@ def average_states(self): States averages as density matrices. """ if self._sum_states_no_jump is None: - return None + if not (self.trajectories and self.trajectories[0].states): + return None + self._sum_states_no_jump = [ + qzero_like(self._to_dm(state)) + for state in self.trajectories[0].states + ] + self._sum_states_jump = [ + qzero_like(self._to_dm(state)) + for state in self.trajectories[0].states + ] + self.num_trajectories = 0 + for trajectory in self.trajectories: + self.num_trajectories += 1 + self._reduce_states(trajectory) p = self.no_jump_prob - return [p * final_no_jump - + (1 - p) * final_jump / (self.num_trajectories - 1) - for final_no_jump, final_jump in - zip(self._sum_states_no_jump, self._sum_states_jump)] + return [ + p * final_no_jump + + (1 - p) * final_jump / (self.num_trajectories - 1) + for final_no_jump, final_jump in zip( + self._sum_states_no_jump, self._sum_states_jump + ) + ] @property def average_final_state(self): @@ -1123,11 +1234,11 @@ def average_final_state(self): Last states of each trajectory averaged into a density matrix. """ if self._sum_final_states_no_jump is None: - return None + if self.average_states is not None: + return self.average_states[-1] p = self.no_jump_prob - return ( - p * self._sum_final_states_no_jump - + (1 - p) * self._sum_final_states_jump + return p * self._sum_final_states_no_jump + ( + ((1 - p) * self._sum_final_states_jump) / (self.num_trajectories - 1) ) @@ -1145,8 +1256,10 @@ def photocurrent(self): for t, which in collapses: cols[which].append(t) mesurement = [ - (1 - self.no_jump_prob) / (self.num_trajectories - 1) * - np.histogram(cols[i], tlist)[0] / np.diff(tlist) + (1 - self.no_jump_prob) + / (self.num_trajectories - 1) + * np.histogram(cols[i], tlist)[0] + / np.diff(tlist) for i in range(self.num_c_ops) ] return mesurement @@ -1241,15 +1354,15 @@ def _add_first_traj(self, trajectory): def _add_trace(self, trajectory): new_trace = np.array(trajectory.trace) self._sum_trace += new_trace - self._sum2_trace += np.abs(new_trace)**2 + self._sum2_trace += np.abs(new_trace) ** 2 avg = self._sum_trace / self.num_trajectories avg2 = self._sum2_trace / self.num_trajectories self.average_trace = avg - self.std_trace = np.sqrt(np.abs(avg2 - np.abs(avg)**2)) + self.std_trace = np.sqrt(np.abs(avg2 - np.abs(avg) ** 2)) - if self.options['keep_runs_results']: + if self.options["keep_runs_results"]: self.runs_trace.append(trajectory.trace) @property diff --git a/qutip/solver/sode/rouchon.py b/qutip/solver/sode/rouchon.py index a00d8e41de..61c30c0996 100644 --- a/qutip/solver/sode/rouchon.py +++ b/qutip/solver/sode/rouchon.py @@ -1,4 +1,5 @@ import numpy as np +import warnings from qutip import unstack_columns, stack_columns from qutip.core import data as _data from ..stochastic import StochasticSolver @@ -97,12 +98,16 @@ def set_state(self, t, state0, generator): def integrate(self, t, copy=True): delta_t = (t - self.t) + dt = self.options["dt"] if delta_t < 0: raise ValueError("Stochastic integration need increasing times") - elif delta_t == 0: - return self.t, self.state, np.zeros() + elif delta_t < 0.5 * dt: + warnings.warn( + f"Step under minimum step ({dt}), skipped.", + RuntimeWarning + ) + return self.t, self.state, np.zeros(len(self.sc_ops)) - dt = self.options["dt"] N, extra = np.divmod(delta_t, dt) N = int(N) if extra > 0.5 * dt: diff --git a/qutip/solver/sode/sode.py b/qutip/solver/sode/sode.py index 33ffaae96a..e43e8f5ac1 100644 --- a/qutip/solver/sode/sode.py +++ b/qutip/solver/sode/sode.py @@ -1,4 +1,5 @@ import numpy as np +import warnings from . import _sode from ..integrator.integrator import Integrator from ..stochastic import StochasticSolver, SMESolver @@ -135,12 +136,16 @@ def __init__(self, rhs, options): def integrate(self, t, copy=True): delta_t = t - self.t + dt = self.options["dt"] if delta_t < 0: - raise ValueError("Stochastic integration time") - elif delta_t == 0: + raise ValueError("Integration time, can't be negative.") + elif delta_t < 0.5 * dt: + warnings.warn( + f"Step under minimum step ({dt}), skipped.", + RuntimeWarning + ) return self.t, self.state, np.zeros(self.N_dw) - dt = self.options["dt"] N, extra = np.divmod(delta_t, dt) N = int(N) if extra > 0.5 * dt: diff --git a/qutip/solver/stochastic.py b/qutip/solver/stochastic.py index f548b2a907..b2f81e57df 100644 --- a/qutip/solver/stochastic.py +++ b/qutip/solver/stochastic.py @@ -1,15 +1,15 @@ __all__ = ["smesolve", "SMESolver", "ssesolve", "SSESolver"] -from .sode.ssystem import * +from .sode.ssystem import StochasticOpenSystem, StochasticClosedSystem from .result import MultiTrajResult, Result, ExpectOp from .multitraj import MultiTrajSolver -from .. import Qobj, QobjEvo, liouvillian, lindblad_dissipator +from .. import Qobj, QobjEvo import numpy as np -from collections.abc import Iterable from functools import partial from .solver_base import _solver_deprecation from ._feedback import _QobjFeedback, _DataFeedback, _WeinerFeedback + class StochasticTrajResult(Result): def _post_init(self, m_ops=(), dw_factor=(), heterodyne=False): super()._post_init() @@ -333,10 +333,10 @@ def smesolve( - | method : str | Which stochastic differential equation integration method to use. Main ones are {"euler", "rouchon", "platen", "taylor1.5_imp"} - - | map : str {"serial", "parallel", "loky"} - | How to run the trajectories. "parallel" uses concurent module to - run in parallel while "loky" use the module of the same name to do - so. + - | map : str {"serial", "parallel", "loky", "mpi"} + | How to run the trajectories. "parallel" uses the multiprocessing + module to run in parallel while "loky" and "mpi" use the "loky" and + "mpi4py" modules to do so. - | num_cpus : NoneType, int | Number of cpus to use when running in parallel. ``None`` detect the number of available cpus. @@ -344,8 +344,11 @@ def smesolve( | The finite steps lenght for the Stochastic integration method. Default change depending on the integrator. - Other options could be supported depending on the integration method, - see `SIntegrator <./classes.html#classes-sode>`_. + Additional options are listed under + `options <./classes.html#qutip.solver.stochastic.SMESolver.options>`__. + More options may be available depending on the selected + differential equation integration method, see + `SIntegrator <./classes.html#classes-sode>`_. Returns ------- @@ -453,10 +456,10 @@ def ssesolve( - | method : str | Which stochastic differential equation integration method to use. Main ones are {"euler", "rouchon", "platen", "taylor1.5_imp"} - - | map : str {"serial", "parallel", "loky"} - How to run the trajectories. "parallel" uses concurent module to - run in parallel while "loky" use the module of the same name to do - so. + - | map : str {"serial", "parallel", "loky", "mpi"} + | How to run the trajectories. "parallel" uses the multiprocessing + module to run in parallel while "loky" and "mpi" use the "loky" and + "mpi4py" modules to do so. - | num_cpus : NoneType, int | Number of cpus to use when running in parallel. ``None`` detect the number of available cpus. @@ -464,8 +467,11 @@ def ssesolve( | The finite steps lenght for the Stochastic integration method. Default change depending on the integrator. - Other options could be supported depending on the integration method, - see `SIntegrator <./classes.html#classes-sode>`_. + Additional options are listed under + `options <./classes.html#qutip.solver.stochastic.SSESolver.options>`__. + More options may be available depending on the selected + differential equation integration method, see + `SIntegrator <./classes.html#classes-sode>`_. Returns ------- @@ -498,13 +504,14 @@ class StochasticSolver(MultiTrajSolver): "progress_kwargs": {"chunk_size": 10}, "store_final_state": False, "store_states": None, - "store_measurement": False, "keep_runs_results": False, "normalize_output": False, - "method": "taylor1.5", "map": "serial", + "mpi_options": {}, "num_cpus": None, "bitgenerator": None, + "method": "platen", + "store_measurement": False, } def __init__(self, H, sc_ops, heterodyne, *, c_ops=(), options=None): @@ -671,13 +678,22 @@ def options(self): Whether to store results from all trajectories or just store the averages. + normalize_output: bool + Normalize output state to hide ODE numerical errors. + method: str, default: "platen" - Which ODE integrator methods are supported. + Which differential equation integration method to use. + + map: str {"serial", "parallel", "loky", "mpi"}, default: "serial" + How to run the trajectories. "parallel" uses the multiprocessing + module to run in parallel while "loky" and "mpi" use the "loky" and + "mpi4py" modules to do so. - map: str {"serial", "parallel", "loky"}, default: "serial" - How to run the trajectories. "parallel" uses concurent module to - run in parallel while "loky" use the module of the same name to do - so. + mpi_options: dict, default: {} + Only applies if map is "mpi". This dictionary will be passed as + keyword arguments to the `mpi4py.futures.MPIPoolExecutor` + constructor. Note that the `max_workers` argument is provided + separately through the `num_cpus` option. num_cpus: None, int, default: None Number of cpus to use when running in parallel. ``None`` detect the @@ -783,13 +799,14 @@ class SMESolver(StochasticSolver): "progress_kwargs": {"chunk_size": 10}, "store_final_state": False, "store_states": None, - "store_measurement": False, "keep_runs_results": False, "normalize_output": False, - "method": "platen", "map": "serial", + "mpi_options": {}, "num_cpus": None, "bitgenerator": None, + "method": "platen", + "store_measurement": False, } @@ -826,11 +843,12 @@ class SSESolver(StochasticSolver): "progress_kwargs": {"chunk_size": 10}, "store_final_state": False, "store_states": None, - "store_measurement": False, "keep_runs_results": False, "normalize_output": False, - "method": "platen", "map": "serial", + "mpi_options": {}, "num_cpus": None, "bitgenerator": None, + "method": "platen", + "store_measurement": False, } diff --git a/qutip/tests/core/test_dimensions.py b/qutip/tests/core/test_dimensions.py index 0686e28b71..7be9d27600 100644 --- a/qutip/tests/core/test_dimensions.py +++ b/qutip/tests/core/test_dimensions.py @@ -162,3 +162,30 @@ def test_super(self, base, expected): def test_bad_dims(dims_list): with pytest.raises(ValueError): Dimensions([dims_list, [1]]) + + +@pytest.mark.parametrize("space_l", [[1], [2], [2, 3]]) +@pytest.mark.parametrize("space_m", [[1], [2], [2, 3]]) +@pytest.mark.parametrize("space_r", [[1], [2], [2, 3]]) +def test_dims_matmul(space_l, space_m, space_r): + dims_l = Dimensions([space_l, space_m]) + dims_r = Dimensions([space_m, space_r]) + assert dims_l @ dims_r == Dimensions([space_l, space_r]) + + +def test_dims_matmul_bad(): + dims_l = Dimensions([[1], [3]]) + dims_r = Dimensions([[2], [2]]) + with pytest.raises(TypeError): + dims_l @ dims_r + + +def test_dims_comparison(): + assert Dimensions([[1], [2]]) == Dimensions([[1], [2]]) + assert not Dimensions([[1], [2]]) != Dimensions([[1], [2]]) + assert Dimensions([[1], [2]]) != Dimensions([[2], [1]]) + assert not Dimensions([[1], [2]]) == Dimensions([[2], [1]]) + assert Dimensions([[1], [2]])[1] == Dimensions([[1], [2]])[1] + assert Dimensions([[1], [2]])[0] != Dimensions([[1], [2]])[1] + assert not Dimensions([[1], [2]])[1] != Dimensions([[1], [2]])[1] + assert not Dimensions([[1], [2]])[0] != Dimensions([[1], [2]])[0] diff --git a/qutip/tests/core/test_operators.py b/qutip/tests/core/test_operators.py index 46fa020481..625385def3 100644 --- a/qutip/tests/core/test_operators.py +++ b/qutip/tests/core/test_operators.py @@ -320,10 +320,19 @@ def test_qeye_like(dims, superrep, dtype): expected = qutip.qeye(dims, dtype=dtype) expected.superrep = superrep assert new == expected + assert new.dtype is qutip.data.to.parse(dtype) opevo = qutip.QobjEvo(op) new = qutip.qeye_like(op) assert new == expected + assert new.dtype is qutip.data.to.parse(dtype) + + +def test_qeye_like_error(): + with pytest.raises(ValueError) as err: + qutip.qeye_like(qutip.basis(3)) + + assert "non square matrix" in str(err.value) @pytest.mark.parametrize(["dims", "superrep"], [ @@ -340,10 +349,12 @@ def test_qzero_like(dims, superrep, dtype): expected = qutip.qzero(dims, dtype=dtype) expected.superrep = superrep assert new == expected + assert new.dtype is qutip.data.to.parse(dtype) opevo = qutip.QobjEvo(op) new = qutip.qzero_like(op) assert new == expected + assert new.dtype is qutip.data.to.parse(dtype) @pytest.mark.parametrize('n_sites', [2, 3, 4, 5]) diff --git a/qutip/tests/core/test_qobj.py b/qutip/tests/core/test_qobj.py index 9ad09fe6fa..7aa7dc0b3b 100644 --- a/qutip/tests/core/test_qobj.py +++ b/qutip/tests/core/test_qobj.py @@ -1262,3 +1262,9 @@ def test_data_as(): with pytest.raises(ValueError) as err: qobj.data_as("ndarray") assert "dia_matrix" in str(err.value) + + +@pytest.mark.parametrize('dtype', ["CSR", "Dense"]) +def test_qobj_dtype(dtype): + obj = qutip.qeye(2, dtype=dtype) + assert obj.dtype == qutip.data.to.parse(dtype) \ No newline at end of file diff --git a/qutip/tests/core/test_qobjevo.py b/qutip/tests/core/test_qobjevo.py index 9fc8454d15..6fb6211d64 100644 --- a/qutip/tests/core/test_qobjevo.py +++ b/qutip/tests/core/test_qobjevo.py @@ -6,6 +6,7 @@ rand_herm, rand_ket, liouvillian, basis, spre, spost, to_choi, expect, rand_ket, rand_dm, operator_to_vector, SESolver, MESolver ) +import qutip.core.data as _data import numpy as np from numpy.testing import assert_allclose @@ -623,3 +624,18 @@ def test_feedback_super(): checker.state = rand_dm(4) checker.state.dims = [[[2],[2]], [[2],[2]]] qevo.matmul_data(0, checker.state.data) + + +@pytest.mark.parametrize('dtype', ["CSR", "Dense"]) +def test_qobjevo_dtype(dtype): + obj = QobjEvo([qeye(2, dtype=dtype), [num(2, dtype=dtype), lambda t: t]]) + assert obj.dtype == _data.to.parse(dtype) + + obj = QobjEvo(lambda t: qeye(2, dtype=dtype)) + assert obj.dtype == _data.to.parse(dtype) + + +def test_qobjevo_mixed(): + obj = QobjEvo([qeye(2, dtype="CSR"), [num(2, dtype="Dense"), lambda t: t]]) + # We test that the output dtype is a know type: accepted by `to.parse`. + _data.to.parse(obj.dtype) \ No newline at end of file diff --git a/qutip/tests/solver/test_parallel.py b/qutip/tests/solver/test_parallel.py index bc3af0d62f..a666ab8456 100644 --- a/qutip/tests/solver/test_parallel.py +++ b/qutip/tests/solver/test_parallel.py @@ -4,7 +4,7 @@ import threading from qutip.solver.parallel import ( - parallel_map, serial_map, loky_pmap, MapExceptions + parallel_map, serial_map, loky_pmap, mpi_pmap, MapExceptions ) @@ -22,6 +22,7 @@ def _func2(x, a, b, c, d=0, e=0, f=0): @pytest.mark.parametrize('map', [ pytest.param(parallel_map, id='parallel_map'), pytest.param(loky_pmap, id='loky_pmap'), + pytest.param(mpi_pmap, id='mpi_pmap'), pytest.param(serial_map, id='serial_map'), ]) @pytest.mark.parametrize('num_cpus', @@ -29,7 +30,9 @@ def _func2(x, a, b, c, d=0, e=0, f=0): ids=['1', '2']) def test_map(map, num_cpus): if map is loky_pmap: - loky = pytest.importorskip("loky") + pytest.importorskip("loky") + if map is mpi_pmap: + pytest.importorskip("mpi4py") args = (1, 2, 3) kwargs = {'d': 4, 'e': 5, 'f': 6} @@ -48,6 +51,7 @@ def test_map(map, num_cpus): @pytest.mark.parametrize('map', [ pytest.param(parallel_map, id='parallel_map'), pytest.param(loky_pmap, id='loky_pmap'), + pytest.param(mpi_pmap, id='mpi_pmap'), pytest.param(serial_map, id='serial_map'), ]) @pytest.mark.parametrize('num_cpus', @@ -55,7 +59,10 @@ def test_map(map, num_cpus): ids=['1', '2']) def test_map_accumulator(map, num_cpus): if map is loky_pmap: - loky = pytest.importorskip("loky") + pytest.importorskip("loky") + if map is mpi_pmap: + pytest.importorskip("mpi4py") + args = (1, 2, 3) kwargs = {'d': 4, 'e': 5, 'f': 6} map_kw = { @@ -84,28 +91,40 @@ def func(i): @pytest.mark.parametrize('map', [ pytest.param(parallel_map, id='parallel_map'), pytest.param(loky_pmap, id='loky_pmap'), + pytest.param(mpi_pmap, id='mpi_pmap'), pytest.param(serial_map, id='serial_map'), ]) def test_map_pass_error(map): + kwargs = {} if map is loky_pmap: - loky = pytest.importorskip("loky") + pytest.importorskip("loky") + if map is mpi_pmap: + pytest.importorskip("mpi4py") + # do not use default value for num_cpus for mpi_pmap + kwargs = {'map_kw': {'num_cpus': 1}} with pytest.raises(CustomException) as err: - map(func, range(10)) + map(func, range(10), **kwargs) assert "Error in subprocess" in str(err.value) @pytest.mark.parametrize('map', [ pytest.param(parallel_map, id='parallel_map'), pytest.param(loky_pmap, id='loky_pmap'), + pytest.param(mpi_pmap, id='mpi_pmap'), pytest.param(serial_map, id='serial_map'), ]) def test_map_store_error(map): + map_kw = {"fail_fast": False} if map is loky_pmap: - loky = pytest.importorskip("loky") + pytest.importorskip("loky") + if map is mpi_pmap: + pytest.importorskip("mpi4py") + # do not use default value for num_cpus for mpi_pmap + map_kw.update({'num_cpus': 1}) with pytest.raises(MapExceptions) as err: - map(func, range(10), map_kw={"fail_fast": False}) + map(func, range(10), map_kw=map_kw) map_error = err.value assert "iterations failed" in str(map_error) for iter, error in map_error.errors.items(): @@ -122,11 +141,17 @@ def test_map_store_error(map): @pytest.mark.parametrize('map', [ pytest.param(parallel_map, id='parallel_map'), pytest.param(loky_pmap, id='loky_pmap'), + pytest.param(mpi_pmap, id='mpi_pmap'), pytest.param(serial_map, id='serial_map'), ]) def test_map_early_end(map): + kwargs = {} if map is loky_pmap: - loky = pytest.importorskip("loky") + pytest.importorskip("loky") + if map is mpi_pmap: + pytest.importorskip("mpi4py") + # do not use default value for num_cpus for mpi_pmap + kwargs = {'map_kw': {'num_cpus': 1}} results = [] @@ -134,6 +159,6 @@ def reduce_func(result): results.append(result) return 5 - len(results) - map(_func1, range(100), reduce_func=reduce_func) + map(_func1, range(100), reduce_func=reduce_func, **kwargs) assert len(results) < 100 diff --git a/qutip/tests/solver/test_steadystate.py b/qutip/tests/solver/test_steadystate.py index 74287a81b3..161e904e95 100644 --- a/qutip/tests/solver/test_steadystate.py +++ b/qutip/tests/solver/test_steadystate.py @@ -1,7 +1,9 @@ import numpy as np +import scipy import pytest import qutip import warnings +from packaging import version as pac_version @pytest.mark.parametrize(['method', 'kwargs'], [ @@ -36,6 +38,13 @@ def test_qubit(method, kwargs, dtype): sz = qutip.sigmaz().to(dtype) sm = qutip.destroy(2, dtype=dtype) + if ( + pac_version.parse(scipy.__version__) >= pac_version.parse("1.12") + and "tol" in kwargs + ): + # From scipy 1.12, the tol keyword is renamed to rtol + kwargs["rtol"] = kwargs.pop("tol") + H = 0.5 * 2 * np.pi * sz gamma1 = 0.05 @@ -94,6 +103,13 @@ def test_exact_solution_for_simple_methods(method, kwargs): def test_ho(method, kwargs): # thermal steadystate of an oscillator: compare numerics with analytical # formula + if ( + pac_version.parse(scipy.__version__) >= pac_version.parse("1.12") + and "tol" in kwargs + ): + # From scipy 1.12, the tol keyword is renamed to rtol + kwargs["rtol"] = kwargs.pop("tol") + a = qutip.destroy(30) H = 0.5 * 2 * np.pi * a.dag() * a gamma1 = 0.05 @@ -130,6 +146,13 @@ def test_ho(method, kwargs): pytest.param('iterative-bicgstab', {"atol": 1e-10, "tol": 1e-10}, id="iterative-bicgstab"), ]) def test_driven_cavity(method, kwargs): + if ( + pac_version.parse(scipy.__version__) >= pac_version.parse("1.12") + and "tol" in kwargs + ): + # From scipy 1.12, the tol keyword is renamed to rtol + kwargs["rtol"] = kwargs.pop("tol") + N = 30 Omega = 0.01 * 2 * np.pi Gamma = 0.05 diff --git a/qutip/tests/solver/test_stochastic.py b/qutip/tests/solver/test_stochastic.py index ebbf194daf..dc04b966f1 100644 --- a/qutip/tests/solver/test_stochastic.py +++ b/qutip/tests/solver/test_stochastic.py @@ -348,23 +348,32 @@ def func(t, A, W): def test_deprecation_warnings(): with pytest.warns(FutureWarning, match=r'map_func'): - ssesolve(qeye(2), basis(2), [0, 1e-5], [qeye(2)], map_func=None) + ssesolve(qeye(2), basis(2), [0, 0.01], [qeye(2)], map_func=None) with pytest.warns(FutureWarning, match=r'progress_bar'): - ssesolve(qeye(2), basis(2), [0, 1e-5], [qeye(2)], progress_bar=None) + ssesolve(qeye(2), basis(2), [0, 0.01], [qeye(2)], progress_bar=None) with pytest.warns(FutureWarning, match=r'nsubsteps'): - ssesolve(qeye(2), basis(2), [0, 1e-5], [qeye(2)], nsubsteps=None) + ssesolve(qeye(2), basis(2), [0, 0.01], [qeye(2)], nsubsteps=None) with pytest.warns(FutureWarning, match=r'map_func'): - ssesolve(qeye(2), basis(2), [0, 1e-5], [qeye(2)], map_func=None) + ssesolve(qeye(2), basis(2), [0, 0.01], [qeye(2)], map_func=None) with pytest.warns(FutureWarning, match=r'store_all_expect'): - ssesolve(qeye(2), basis(2), [0, 1e-5], [qeye(2)], store_all_expect=1) + ssesolve(qeye(2), basis(2), [0, 0.01], [qeye(2)], store_all_expect=1) with pytest.warns(FutureWarning, match=r'store_measurement'): - ssesolve(qeye(2), basis(2), [0, 1e-5], [qeye(2)], store_measurement=1) + ssesolve(qeye(2), basis(2), [0, 0.01], [qeye(2)], store_measurement=1) with pytest.raises(TypeError) as err: - ssesolve(qeye(2), basis(2), [0, 1e-5], [qeye(2)], m_ops=1) + ssesolve(qeye(2), basis(2), [0, 0.01], [qeye(2)], m_ops=1) assert '"m_ops" and "dW_factors"' in str(err.value) + + +@pytest.mark.parametrize("method", ["euler", "rouchon"]) +def test_small_step_warnings(method): + with pytest.warns(RuntimeWarning, match=r'under minimum'): + smesolve( + qeye(2), basis(2), [0, 0.0000001], [qeye(2)], + options={"method": method} + ) diff --git a/qutip/tests/test_animation.py b/qutip/tests/test_animation.py index 676e7473a4..0433a38608 100644 --- a/qutip/tests/test_animation.py +++ b/qutip/tests/test_animation.py @@ -1,10 +1,11 @@ import pytest import qutip import numpy as np -import matplotlib as mpl -import matplotlib.pyplot as plt from scipy.special import sph_harm +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") + def test_result_state(): H = qutip.rand_dm(2) tlist = np.linspace(0, 3*np.pi, 2) diff --git a/qutip/tests/test_ipynbtools.py b/qutip/tests/test_ipynbtools.py index 2b1c76559a..2227e0c784 100644 --- a/qutip/tests/test_ipynbtools.py +++ b/qutip/tests/test_ipynbtools.py @@ -1,6 +1,7 @@ -from qutip.ipynbtools import version_table import pytest +pytest.importorskip("IPython") +from qutip.ipynbtools import version_table @pytest.mark.parametrize('verbose', [False, True]) def test_version_table(verbose): diff --git a/qutip/tests/test_visualization.py b/qutip/tests/test_visualization.py index 2d2901e1be..ebf92b4d11 100644 --- a/qutip/tests/test_visualization.py +++ b/qutip/tests/test_visualization.py @@ -1,10 +1,10 @@ import pytest import qutip import numpy as np -import matplotlib as mpl -import matplotlib.pyplot as plt from scipy.special import sph_harm +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") def test_cyclic(): qutip.settings.colorblind_safe = True diff --git a/requirements.txt b/requirements.txt index 41d2c09a1b..9fc7beade5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ cython>=0.29.20 -numpy>=1.16.6 -scipy>=1.5 +numpy>=1.22 +scipy>=1.8 packaging diff --git a/setup.cfg b/setup.cfg index d5373c9c85..ecb3cc0ee4 100644 --- a/setup.cfg +++ b/setup.cfg @@ -31,12 +31,12 @@ packages = find: include_package_data = True zip_safe = False install_requires = - numpy>=1.16.6 - scipy>=1.0 + numpy>=1.22 + scipy>=1.8,<1.12 packaging setup_requires = - numpy>=1.13.3 - scipy>=1.0 + numpy>=1.19 + scipy>=1.8 cython>=0.29.20; python_version>='3.10' cython>=0.29.20,<3.0.3; python_version<='3.9' packaging @@ -61,6 +61,8 @@ ipython = extras = loky tqdm +mpi = + mpi4py ; This uses ConfigParser's string interpolation to include all the above ; dependencies into one single target, convenient for testing full builds. full =