From 2a80e983664ff7273dc5f465f0c9a9b2050bfe47 Mon Sep 17 00:00:00 2001 From: Colin Date: Wed, 8 Nov 2023 11:50:43 -0800 Subject: [PATCH 01/50] Adding more papers --- docs/papers.md | 57 +++++++++++++++++++++++++++++++------------------- 1 file changed, 35 insertions(+), 22 deletions(-) diff --git a/docs/papers.md b/docs/papers.md index 4b1bc6b8d..234cede09 100644 --- a/docs/papers.md +++ b/docs/papers.md @@ -5,57 +5,70 @@ Please email clophus@lbl.gov if you have used py4DSTEM for analysis and your paper is not listed below! -### 2022 (9) +### 2023 (0) -[Correlative image learning of chemo-mechanics in phase-transforming solids](https://www.nature.com/articles/s41563-021-01191-0), Nature Materials (2022) -[Correlative analysis of structure and chemistry of LixFePO4 platelets using 4D-STEM and X-ray ptychography](https://doi.org/10.1016/j.mattod.2021.10.031), Materials Today 52, 102 (2022). -[Visualizing Grain Statistics in MOCVD WSe2 through Four-Dimensional Scanning Transmission Electron Microscopy](https://doi.org/10.1021/acs.nanolett.1c04315), Nano Letters 22, 2578 (2022). +### 2022 (13) -[Electric field control of chirality](https://doi.org/10.1126/sciadv.abj8030), Science Advances 8 (2022). -[Real-Time Interactive 4D-STEM Phase-Contrast Imaging From Electron Event Representation Data: Less computation with the right representation](https://doi.org/10.1109/MSP.2021.3120981), IEEE Signal Processing Magazine 39, 25 (2022). -[Microstructural dependence of defect formation in iron-oxide thin films](https://doi.org/10.1016/j.apsusc.2022.152844), Applied Surface Science 589, 152844 (2022). +[Disentangling multiple scattering with deep learning: application to strain mapping from electron diffraction patterns](https://doi.org/10.1038/s41524-022-00939-9), J Munshi*, A Rakowski*, et al., npj Computational Materials 8, 254 (2022) -[Chemical and Structural Alterations in the Amorphous Structure of Obsidian due to Nanolites](https://doi.org/10.1017/S1431927621013957), Microscopy and Microanalysis 28, 289 (2022). +[Defect Contrast with 4D-STEM: Understanding Crystalline Order with Virtual Detectors and Beam Modification](https://doi.org/10.1093/micmic/ozad045) SM Ribet et al., Microscopy and Microanalysis 29, 1087 (2023). -[Nanoscale characterization of crystalline and amorphous phases in silicon oxycarbide ceramics using 4D-STEM](https://doi.org/10.1016/j.matchar.2021.111512), Materials Characterization 181, 111512 (2021). +[Structural heterogeneity in non-crystalline TexSe1−x thin films](https://doi.org/10.1063/5.0094600), B Sari et al., Applied Physics Letters 121, 012101 (2022) -[Disentangling multiple scattering with deep learning: application to strain mapping from electron diffraction patterns](https://arxiv.org/abs/2202.00204), arXiv:2202.00204 (2022). +[Cryogenic 4D-STEM analysis of an amorphouscrystalline polymer blend: Combined nanocrystalline and amorphous phase mapping](https://doi.org/10.1016/j.isci.2022.103882), J Donohue et al., iScience 25, 103882 (2022) + +[Developing a Chemical and Structural Understanding of the Surface Oxide in a Niobium Superconducting Qubit](https://doi.org/10.1021/acsnano.2c07913), AA Murthy et al., ACS Nano 16, 17257 (2022) + +[Correlative image learning of chemo-mechanics in phase-transforming solids](https://www.nature.com/articles/s41563-021-01191-0), HD Deng et al., Nature Materials (2022) + +[Correlative analysis of structure and chemistry of LixFePO4 platelets using 4D-STEM and X-ray ptychography](https://doi.org/10.1016/j.mattod.2021.10.031), LA Hughes*, BH Savitzky, et al., Materials Today 52, 102 (2022) + +[Visualizing Grain Statistics in MOCVD WSe2 through Four-Dimensional Scanning Transmission Electron Microscopy](https://doi.org/10.1021/acs.nanolett.1c04315), A Londoño-Calderon et al., Nano Letters 22, 2578 (2022) + +[Electric field control of chirality](https://doi.org/10.1126/sciadv.abj8030), P Behera et al., Science Advances 8 (2022) + +[Real-Time Interactive 4D-STEM Phase-Contrast Imaging From Electron Event Representation Data: Less computation with the right representation](https://doi.org/10.1109/MSP.2021.3120981), P Pelz et al., IEEE Signal Processing Magazine 39, 25 (2022) + +[Microstructural dependence of defect formation in iron-oxide thin films](https://doi.org/10.1016/j.apsusc.2022.152844), BK Derby et al., Applied Surface Science 589, 152844 (2022) + +[Chemical and Structural Alterations in the Amorphous Structure of Obsidian due to Nanolites](https://doi.org/10.1017/S1431927621013957), E Kennedy et al., Microscopy and Microanalysis 28, 289 (2022) + +[Nanoscale characterization of crystalline and amorphous phases in silicon oxycarbide ceramics using 4D-STEM](https://doi.org/10.1016/j.matchar.2021.111512), Ni Yang et al., Materials Characterization 181, 111512 (2021) ### 2021 (10) -[Cryoforged nanotwinned titanium with ultrahigh strength and ductility](https://doi.org/10.1126/science.abe7252), Science 16, 373, 1363 (2021). +[Cryoforged nanotwinned titanium with ultrahigh strength and ductility](https://doi.org/10.1126/science.abe7252), Science 16, 373, 1363 (2021) -[Strain fields in twisted bilayer graphene](https://doi.org/10.1038/s41563-021-00973-w), Nature Materials 20, 956 (2021). +[Strain fields in twisted bilayer graphene](https://doi.org/10.1038/s41563-021-00973-w), Nature Materials 20, 956 (2021) -[Determination of Grain-Boundary Structure and Electrostatic Characteristics in a SrTiO3 Bicrystal by Four-Dimensional Electron Microscopy](https://doi.org/10.1021/acs.nanolett.1c02960), Nanoletters 21, 9138 (2021). +[Determination of Grain-Boundary Structure and Electrostatic Characteristics in a SrTiO3 Bicrystal by Four-Dimensional Electron Microscopy](https://doi.org/10.1021/acs.nanolett.1c02960), Nanoletters 21, 9138 (2021) [Local Lattice Deformation of Tellurene Grain Boundaries by Four-Dimensional Electron Microscopy](https://pubs.acs.org/doi/10.1021/acs.jpcc.1c00308), Journal of Physical Chemistry C 125, 3396 (2021). -[Extreme mixing in nanoscale transition metal alloys](https://doi.org/10.1016/j.matt.2021.04.014), Matter 4, 2340 (2021). +[Extreme mixing in nanoscale transition metal alloys](https://doi.org/10.1016/j.matt.2021.04.014), Matter 4, 2340 (2021) -[Multibeam Electron Diffraction](https://doi.org/10.1017/S1431927620024770), Microscopy and Microanalysis 27, 129 (2021). +[Multibeam Electron Diffraction](https://doi.org/10.1017/S1431927620024770), Microscopy and Microanalysis 27, 129 (2021) -[A Fast Algorithm for Scanning Transmission Electron Microscopy Imaging and 4D-STEM Diffraction Simulations](https://doi.org/10.1017/S1431927621012083), Microscopy and Microanalysis 27, 835 (2021). +[A Fast Algorithm for Scanning Transmission Electron Microscopy Imaging and 4D-STEM Diffraction Simulations](https://doi.org/10.1017/S1431927621012083), Microscopy and Microanalysis 27, 835 (2021) -[Fast Grain Mapping with Sub-Nanometer Resolution Using 4D-STEM with Grain Classification by Principal Component Analysis and Non-Negative Matrix Factorization](https://doi.org/10.1017/S1431927621011946), Microscopy and Microanalysis 27, 794 +[Fast Grain Mapping with Sub-Nanometer Resolution Using 4D-STEM with Grain Classification by Principal Component Analysis and Non-Negative Matrix Factorization](https://doi.org/10.1017/S1431927621011946), Microscopy and Microanalysis 27, 794 (2021) -[Prismatic 2.0 – Simulation software for scanning and high resolution transmission electron microscopy (STEM and HRTEM)](https://doi.org/10.1016/j.micron.2021.103141), Micron 151, 103141 (2021). +[Prismatic 2.0 – Simulation software for scanning and high resolution transmission electron microscopy (STEM and HRTEM)](https://doi.org/10.1016/j.micron.2021.103141), Micron 151, 103141 (2021) -[4D-STEM of Beam-Sensitive Materials](https://doi.org/10.1021/acs.accounts.1c00073), Accounts of Chemical Research 54, 2543 (2021). +[4D-STEM of Beam-Sensitive Materials](https://doi.org/10.1021/acs.accounts.1c00073), Accounts of Chemical Research 54, 2543 (2021) ### 2020 (3) -[Patterned probes for high precision 4D-STEM bragg measurements](https://doi.org/10.1063/5.0015532), Ultramicroscopy 209, 112890 (2020). - +[Patterned probes for high precision 4D-STEM bragg measurements](https://doi.org/10.1063/5.0015532), Ultramicroscopy 209, 112890 (2020) -[Tilted fluctuation electron microscopy](https://doi.org/10.1063/5.0015532), Applied Physics Letters 117, 091903 (2020). +[Tilted fluctuation electron microscopy](https://doi.org/10.1063/5.0015532), Applied Physics Letters 117, 091903 (2020) [4D-STEM elastic stress state characterisation of a TWIP steel nanotwin](https://arxiv.org/abs/2004.03982), arXiv:2004.03982 From 53ad64b5905c92d23b347d321d029ced9ba28d9e Mon Sep 17 00:00:00 2001 From: Colin Date: Wed, 8 Nov 2023 11:59:55 -0800 Subject: [PATCH 02/50] more cites --- docs/papers.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/papers.md b/docs/papers.md index 234cede09..4539dd943 100644 --- a/docs/papers.md +++ b/docs/papers.md @@ -9,18 +9,24 @@ Please email clophus@lbl.gov if you have used py4DSTEM for analysis and your pap -### 2022 (13) - +### 2022 (16) [Disentangling multiple scattering with deep learning: application to strain mapping from electron diffraction patterns](https://doi.org/10.1038/s41524-022-00939-9), J Munshi*, A Rakowski*, et al., npj Computational Materials 8, 254 (2022) +[Flexible CO2 Sensor Architecture with Selective Nitrogen Functionalities by One-Step Laser-Induced Conversion of Versatile Organic Ink](https://doi.org/10.1002/adfm.202207406), H Wang et al., Advanced Functional Materials 32, 2207406 (2022) + [Defect Contrast with 4D-STEM: Understanding Crystalline Order with Virtual Detectors and Beam Modification](https://doi.org/10.1093/micmic/ozad045) SM Ribet et al., Microscopy and Microanalysis 29, 1087 (2023). [Structural heterogeneity in non-crystalline TexSe1−x thin films](https://doi.org/10.1063/5.0094600), B Sari et al., Applied Physics Letters 121, 012101 (2022) [Cryogenic 4D-STEM analysis of an amorphouscrystalline polymer blend: Combined nanocrystalline and amorphous phase mapping](https://doi.org/10.1016/j.isci.2022.103882), J Donohue et al., iScience 25, 103882 (2022) +[Hydrogen-assisted decohesion associated with nanosized grain boundary κ-carbides in a high-Mn lightweight steel](https://doi.org/10.1016/j.actamat.2022.118392), MN Elkot et al., Acta Materialia + 241, 118392 (2022) + +[4D-STEM Ptychography for Electron-Beam-Sensitive Materials](https://doi.org/10.1021/acscentsci.2c01137), G Li et al., ACS Central Science 8, 1579 (2022) + [Developing a Chemical and Structural Understanding of the Surface Oxide in a Niobium Superconducting Qubit](https://doi.org/10.1021/acsnano.2c07913), AA Murthy et al., ACS Nano 16, 17257 (2022) [Correlative image learning of chemo-mechanics in phase-transforming solids](https://www.nature.com/articles/s41563-021-01191-0), HD Deng et al., Nature Materials (2022) From cbce25cd754f1af68c6aded6bfe6c329425fcbae Mon Sep 17 00:00:00 2001 From: Stephanie Ribet Date: Tue, 9 Jan 2024 10:02:37 -0800 Subject: [PATCH 03/50] bug fix --- py4DSTEM/process/phase/iterative_parallax.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py4DSTEM/process/phase/iterative_parallax.py b/py4DSTEM/process/phase/iterative_parallax.py index 34454088a..4b1296c34 100644 --- a/py4DSTEM/process/phase/iterative_parallax.py +++ b/py4DSTEM/process/phase/iterative_parallax.py @@ -1281,7 +1281,7 @@ def subpixel_alignment( if self._DF_upsample_limit < 1: warnings.warn( ( - f"Dark-field upsampling limit of {self._DF_upsampling_limit:.2f} " + f"Dark-field upsampling limit of {self._DF_upsample_limit:.2f} " "is less than 1, implying a scan step-size smaller than Nyquist. " "setting to 1." ), From d366aca63e2da5a2796acb5637e94876c14bd953 Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Thu, 11 Jan 2024 22:29:29 -0800 Subject: [PATCH 04/50] Revert "bug fix" This reverts commit cbce25cd754f1af68c6aded6bfe6c329425fcbae. --- py4DSTEM/process/phase/iterative_parallax.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py4DSTEM/process/phase/iterative_parallax.py b/py4DSTEM/process/phase/iterative_parallax.py index 4b1296c34..34454088a 100644 --- a/py4DSTEM/process/phase/iterative_parallax.py +++ b/py4DSTEM/process/phase/iterative_parallax.py @@ -1281,7 +1281,7 @@ def subpixel_alignment( if self._DF_upsample_limit < 1: warnings.warn( ( - f"Dark-field upsampling limit of {self._DF_upsample_limit:.2f} " + f"Dark-field upsampling limit of {self._DF_upsampling_limit:.2f} " "is less than 1, implying a scan step-size smaller than Nyquist. " "setting to 1." ), From c109d95038707c838b789ddd4f53d9c9d58f3d76 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 19:22:01 +0000 Subject: [PATCH 05/50] Bump tj-actions/changed-files from 41 to 42 Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 41 to 42. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v41...v42) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/pypi_upload.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pypi_upload.yml b/.github/workflows/pypi_upload.yml index 906ec5d1a..d0fdbec02 100644 --- a/.github/workflows/pypi_upload.yml +++ b/.github/workflows/pypi_upload.yml @@ -21,7 +21,7 @@ jobs: token: ${{ secrets.GH_ACTION_VERSION_UPDATE }} - name: Get changed files id: changed-files-specific - uses: tj-actions/changed-files@v41 + uses: tj-actions/changed-files@v42 with: files: | py4DSTEM/version.py From fc51379d80ee39cff427aa4859a319c358e2e386 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 19:22:05 +0000 Subject: [PATCH 06/50] Bump github/super-linter from 4 to 5 Bumps [github/super-linter](https://github.com/github/super-linter) from 4 to 5. - [Changelog](https://github.com/github/super-linter/blob/main/docs/release-process.md) - [Commits](https://github.com/github/super-linter/compare/v4...v5) --- updated-dependencies: - dependency-name: github/super-linter dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/linter.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 3e8071f6f..cad57279d 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -17,7 +17,7 @@ jobs: fetch-depth: 0 - name: Lint Code Base - uses: github/super-linter@v4 + uses: github/super-linter@v5 env: VALIDATE_ALL_CODEBASE: false VALIDATE_PYTHON_FLAKE8: true From 1598ac2aa76b5fbdfad454d3d061eef03ef4a8b5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 19:22:09 +0000 Subject: [PATCH 07/50] Bump actions/checkout from 3 to 4 Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/black.yml | 2 +- .github/workflows/build-flake.yml | 2 +- .github/workflows/check_install_dev.yml | 2 +- .github/workflows/check_install_main.yml | 2 +- .github/workflows/check_install_quick.yml | 2 +- .github/workflows/linter.yml | 2 +- .github/workflows/pypi_upload.yml | 6 +++--- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml index 09b2a0fba..6ed2cc1e0 100644 --- a/.github/workflows/black.yml +++ b/.github/workflows/black.yml @@ -10,5 +10,5 @@ jobs: lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: psf/black@stable \ No newline at end of file diff --git a/.github/workflows/build-flake.yml b/.github/workflows/build-flake.yml index 3393b7908..ad4a270e7 100644 --- a/.github/workflows/build-flake.yml +++ b/.github/workflows/build-flake.yml @@ -18,7 +18,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v3 with: diff --git a/.github/workflows/check_install_dev.yml b/.github/workflows/check_install_dev.yml index a960dc2f2..e6df6fee1 100644 --- a/.github/workflows/check_install_dev.yml +++ b/.github/workflows/check_install_dev.yml @@ -28,7 +28,7 @@ jobs: # runs-on: macos-latest # allow_failure: true steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup Python ${{ matrix.python-version }} uses: actions/setup-python@v4 diff --git a/.github/workflows/check_install_main.yml b/.github/workflows/check_install_main.yml index d27278ba9..c0f0cb810 100644 --- a/.github/workflows/check_install_main.yml +++ b/.github/workflows/check_install_main.yml @@ -28,7 +28,7 @@ jobs: # runs-on: macos-latest # allow_failure: true steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup Python ${{ matrix.python-version }} uses: actions/setup-python@v4 diff --git a/.github/workflows/check_install_quick.yml b/.github/workflows/check_install_quick.yml index 0d20bd759..c905c4dd3 100644 --- a/.github/workflows/check_install_quick.yml +++ b/.github/workflows/check_install_quick.yml @@ -28,7 +28,7 @@ jobs: # runs-on: macos-latest # allow_failure: true steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup Python ${{ matrix.python-version }} uses: actions/setup-python@v4 diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 3e8071f6f..18bd78daa 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # Full git history is needed to get a proper list of changed files within `super-linter` fetch-depth: 0 diff --git a/.github/workflows/pypi_upload.yml b/.github/workflows/pypi_upload.yml index 906ec5d1a..71853a296 100644 --- a/.github/workflows/pypi_upload.yml +++ b/.github/workflows/pypi_upload.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest name: Check if version.py is changed and update if the version.py is not changed steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 token: ${{ secrets.GH_ACTION_VERSION_UPDATE }} @@ -46,7 +46,7 @@ jobs: name: Sync main with dev steps: - name: Sync main with dev - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: dev fetch-depth: 0 @@ -63,7 +63,7 @@ jobs: runs-on: ubuntu-latest name: Deploy to PyPI steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: dev - name: Set up Python From f35f683f19d31872685e3bccafef1f27bf17f7e4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 19:22:12 +0000 Subject: [PATCH 08/50] Bump actions/setup-python from 2 to 5 Bumps [actions/setup-python](https://github.com/actions/setup-python) from 2 to 5. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v2...v5) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/build-flake.yml | 2 +- .github/workflows/check_install_dev.yml | 2 +- .github/workflows/check_install_main.yml | 2 +- .github/workflows/check_install_quick.yml | 2 +- .github/workflows/pypi_upload.yml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build-flake.yml b/.github/workflows/build-flake.yml index 3393b7908..d077aae8c 100644 --- a/.github/workflows/build-flake.yml +++ b/.github/workflows/build-flake.yml @@ -20,7 +20,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: Set up Python 3.10 - uses: actions/setup-python@v3 + uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install dependencies diff --git a/.github/workflows/check_install_dev.yml b/.github/workflows/check_install_dev.yml index a960dc2f2..db5a175ce 100644 --- a/.github/workflows/check_install_dev.yml +++ b/.github/workflows/check_install_dev.yml @@ -31,7 +31,7 @@ jobs: - uses: actions/checkout@v3 - name: Setup Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install repo diff --git a/.github/workflows/check_install_main.yml b/.github/workflows/check_install_main.yml index d27278ba9..be262ea21 100644 --- a/.github/workflows/check_install_main.yml +++ b/.github/workflows/check_install_main.yml @@ -31,7 +31,7 @@ jobs: - uses: actions/checkout@v3 - name: Setup Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install repo diff --git a/.github/workflows/check_install_quick.yml b/.github/workflows/check_install_quick.yml index 0d20bd759..8815c3e6e 100644 --- a/.github/workflows/check_install_quick.yml +++ b/.github/workflows/check_install_quick.yml @@ -31,7 +31,7 @@ jobs: - uses: actions/checkout@v3 - name: Setup Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install repo diff --git a/.github/workflows/pypi_upload.yml b/.github/workflows/pypi_upload.yml index 906ec5d1a..4f94bf864 100644 --- a/.github/workflows/pypi_upload.yml +++ b/.github/workflows/pypi_upload.yml @@ -67,7 +67,7 @@ jobs: with: ref: dev - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: 3.8 - name: Install dependencies From fd8ee7b32d64bfd7474c715a969677c820977fe9 Mon Sep 17 00:00:00 2001 From: Stephanie Ribet Date: Sun, 28 Jan 2024 09:48:43 -0800 Subject: [PATCH 09/50] num_probes_fit_aberrations --- py4DSTEM/process/phase/mixedstate_multislice_ptychography.py | 4 ++++ py4DSTEM/process/phase/mixedstate_ptychography.py | 4 ++++ py4DSTEM/process/phase/ptychographic_constraints.py | 5 ++++- 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py index 47dd67dd3..be87e9496 100644 --- a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py @@ -715,6 +715,7 @@ def reconstruct( fit_probe_aberrations_max_radial_order: int = 4, fit_probe_aberrations_remove_initial: bool = False, fit_probe_aberrations_using_scikit_image: bool = True, + num_probes_fit_aberrations: int = np.inf, butterworth_filter: bool = True, q_lowpass: float = None, q_highpass: float = None, @@ -814,6 +815,8 @@ def reconstruct( If true, the necessary phase unwrapping is performed using scikit-image. This is more stable, but occasionally leads to a documented bug where the kernel hangs.. If false, a poisson-based solver is used for phase unwrapping. This won't hang, but tends to underestimate aberrations. + num_probes_fit_aberrations: int + The number of probes on which to apply the probe fitting butterworth_filter: bool, optional If True and q_lowpass or q_highpass is not None, object is smoothed using butterworth filtering q_lowpass: float @@ -1033,6 +1036,7 @@ def reconstruct( fit_probe_aberrations_max_radial_order=fit_probe_aberrations_max_radial_order, fit_probe_aberrations_remove_initial=fit_probe_aberrations_remove_initial, fit_probe_aberrations_using_scikit_image=fit_probe_aberrations_using_scikit_image, + num_probes_fit_aberrations=num_probes_fit_aberrations, fix_probe_aperture=fix_probe_aperture and not fix_probe, initial_probe_aperture=self._probe_initial_aperture, fix_positions=fix_positions, diff --git a/py4DSTEM/process/phase/mixedstate_ptychography.py b/py4DSTEM/process/phase/mixedstate_ptychography.py index 6fbb72b5d..a082e708f 100644 --- a/py4DSTEM/process/phase/mixedstate_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_ptychography.py @@ -620,6 +620,7 @@ def reconstruct( fit_probe_aberrations_max_radial_order: int = 4, fit_probe_aberrations_remove_initial: bool = False, fit_probe_aberrations_using_scikit_image: bool = True, + num_probes_fit_aberrations: int = np.inf, butterworth_filter: bool = True, q_lowpass: float = None, q_highpass: float = None, @@ -716,6 +717,8 @@ def reconstruct( If true, the necessary phase unwrapping is performed using scikit-image. This is more stable, but occasionally leads to a documented bug where the kernel hangs.. If false, a poisson-based solver is used for phase unwrapping. This won't hang, but tends to underestimate aberrations. + num_probes_fit_aberrations: int + The number of probes on which to apply the probe fitting butterworth_filter: bool, optional If True and q_lowpass or q_highpass is not None, object is smoothed using butterworth filtering q_lowpass: float @@ -923,6 +926,7 @@ def reconstruct( fit_probe_aberrations_max_radial_order=fit_probe_aberrations_max_radial_order, fit_probe_aberrations_remove_initial=fit_probe_aberrations_remove_initial, fit_probe_aberrations_using_scikit_image=fit_probe_aberrations_using_scikit_image, + num_probes_fit_aberrations=num_probes_fit_aberrations, fix_probe_aperture=fix_probe_aperture and not fix_probe, initial_probe_aperture=self._probe_initial_aperture, fix_positions=fix_positions, diff --git a/py4DSTEM/process/phase/ptychographic_constraints.py b/py4DSTEM/process/phase/ptychographic_constraints.py index 8a10b4df2..6da679067 100644 --- a/py4DSTEM/process/phase/ptychographic_constraints.py +++ b/py4DSTEM/process/phase/ptychographic_constraints.py @@ -1224,6 +1224,7 @@ def _probe_constraints( fit_probe_aberrations_max_radial_order, fit_probe_aberrations_remove_initial, fit_probe_aberrations_using_scikit_image, + num_probes_fit_aberrations, fix_probe_aperture, initial_probe_aperture, constrain_probe_fourier_amplitude, @@ -1243,7 +1244,9 @@ def _probe_constraints( # Fourier phase (aberrations) fitting if fit_probe_aberrations: - for probe_idx in range(self._num_probes): + if num_probes_fit_aberrations > self._num_probes: + num_probes_fit_aberrations = self._num_probes + for probe_idx in range(num_probes_fit_aberrations): current_probe[probe_idx] = self._probe_aberration_fitting_constraint( current_probe[probe_idx], fit_probe_aberrations_max_angular_order, From 3ca1869c3845e12c0afd34823187990c8fce42a0 Mon Sep 17 00:00:00 2001 From: Stephanie Ribet Date: Wed, 31 Jan 2024 15:21:10 -0800 Subject: [PATCH 10/50] flip 180 option for read arina --- py4DSTEM/io/filereaders/read_arina.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/py4DSTEM/io/filereaders/read_arina.py b/py4DSTEM/io/filereaders/read_arina.py index 832499d3f..045cd0e7c 100644 --- a/py4DSTEM/io/filereaders/read_arina.py +++ b/py4DSTEM/io/filereaders/read_arina.py @@ -12,6 +12,7 @@ def read_arina( binfactor: int = 1, dtype_bin: float = None, flatfield: np.ndarray = None, + flip_180: bool = False, ): """ File reader for arina 4D-STEM datasets @@ -28,6 +29,8 @@ def read_arina( other than uint16 flatfield (np.ndarray): flatfield for correction factors, converts data to float + flip_180 (bool): + if True, rotates on loading data by 180 degrees Returns: DataCube @@ -95,6 +98,9 @@ def read_arina( ) ) + if flip_180: + datacube.data = np.rot90(datacube.data, k=2, axes=(-1, -2)) + return datacube From 633fb017bc4cce82189250158ab42c8b4082439d Mon Sep 17 00:00:00 2001 From: Stephanie Ribet Date: Wed, 31 Jan 2024 21:08:53 -0800 Subject: [PATCH 11/50] ptycho positions offset --- .../phase/magnetic_ptychographic_tomography.py | 5 +++++ py4DSTEM/process/phase/magnetic_ptychography.py | 5 +++++ .../phase/mixedstate_multislice_ptychography.py | 5 +++++ py4DSTEM/process/phase/mixedstate_ptychography.py | 5 +++++ py4DSTEM/process/phase/multislice_ptychography.py | 5 +++++ py4DSTEM/process/phase/phase_base_class.py | 11 +++++++++++ py4DSTEM/process/phase/ptychographic_tomography.py | 5 +++++ py4DSTEM/process/phase/singleslice_ptychography.py | 5 +++++ 8 files changed, 46 insertions(+) diff --git a/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py b/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py index 7249a9064..13cd9a60f 100644 --- a/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py +++ b/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py @@ -112,6 +112,8 @@ class MagneticPtychographicTomography( initial_scan_positions: list of np.ndarray, optional Probe positions in Å for each diffraction intensity per tilt If None, initialized to a grid scan centered along tilt axis + positions_offset_ang: list of np.ndarray, optional + Offset of positions in A verbose: bool, optional If True, class methods will inherit this and print additional information object_type: str, optional @@ -155,6 +157,7 @@ def __init__( initial_object_guess: np.ndarray = None, initial_probe_guess: np.ndarray = None, initial_scan_positions: Sequence[np.ndarray] = None, + positions_offset_ang: Sequence[np.ndarray] = None, verbose: bool = True, device: str = "cpu", storage: str = None, @@ -199,6 +202,7 @@ def __init__( # Common Metadata self._vacuum_probe_intensity = vacuum_probe_intensity self._scan_positions = initial_scan_positions + self._positions_offset_ang = positions_offset_ang self._energy = energy self._semiangle_cutoff = semiangle_cutoff self._semiangle_cutoff_pixels = semiangle_cutoff_pixels @@ -484,6 +488,7 @@ def preprocess( self._scan_positions[index], self._positions_mask[index], self._object_padding_px, + self._positions_offset_ang[index], ) # handle semiangle specified in pixels diff --git a/py4DSTEM/process/phase/magnetic_ptychography.py b/py4DSTEM/process/phase/magnetic_ptychography.py index d718b1a9e..e56b9f5d4 100644 --- a/py4DSTEM/process/phase/magnetic_ptychography.py +++ b/py4DSTEM/process/phase/magnetic_ptychography.py @@ -101,6 +101,8 @@ class MagneticPtychography( initial_scan_positions: np.ndarray, optional Probe positions in Å for each diffraction intensity If None, initialized to a grid scan + positions_offset_ang: np.ndarray, optional + Offset of positions in A verbose: bool, optional If True, class methods will inherit this and print additional information device: str, optional @@ -136,6 +138,7 @@ def __init__( initial_object_guess: np.ndarray = None, initial_probe_guess: np.ndarray = None, initial_scan_positions: np.ndarray = None, + positions_offset_ang: np.ndarray = None, object_type: str = "complex", verbose: bool = True, device: str = "cpu", @@ -179,6 +182,7 @@ def __init__( # Common Metadata self._vacuum_probe_intensity = vacuum_probe_intensity self._scan_positions = initial_scan_positions + self._positions_offset_ang = positions_offset_ang self._energy = energy self._semiangle_cutoff = semiangle_cutoff self._semiangle_cutoff_pixels = semiangle_cutoff_pixels @@ -548,6 +552,7 @@ def preprocess( self._scan_positions[index], self._positions_mask[index], self._object_padding_px, + self._positions_offset_ang, ) # handle semiangle specified in pixels diff --git a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py index be87e9496..3e9a58f6b 100644 --- a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py @@ -107,6 +107,8 @@ class MixedstateMultislicePtychography( initial_scan_positions: np.ndarray, optional Probe positions in Å for each diffraction intensity If None, initialized to a grid scan + positions_offset_ang: np.ndarray, optional + Offset of positions in A theta_x: float x tilt of propagator in mrad theta_y: float @@ -157,6 +159,7 @@ def __init__( initial_object_guess: np.ndarray = None, initial_probe_guess: np.ndarray = None, initial_scan_positions: np.ndarray = None, + positions_offset_ang: np.ndarray = None, theta_x: float = 0, theta_y: float = 0, middle_focus: bool = False, @@ -234,6 +237,7 @@ def __init__( # Common Metadata self._vacuum_probe_intensity = vacuum_probe_intensity self._scan_positions = initial_scan_positions + self._positions_offset_ang = positions_offset_ang self._energy = energy self._semiangle_cutoff = semiangle_cutoff self._semiangle_cutoff_pixels = semiangle_cutoff_pixels @@ -494,6 +498,7 @@ def preprocess( self._scan_positions, self._positions_mask, self._object_padding_px, + self._positions_offset_ang, ) # initialize object diff --git a/py4DSTEM/process/phase/mixedstate_ptychography.py b/py4DSTEM/process/phase/mixedstate_ptychography.py index a082e708f..817517879 100644 --- a/py4DSTEM/process/phase/mixedstate_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_ptychography.py @@ -96,6 +96,8 @@ class MixedstatePtychography( initial_scan_positions: np.ndarray, optional Probe positions in Å for each diffraction intensity If None, initialized to a grid scan + positions_offset_ang: np.ndarray, optional + Offset of positions in A positions_mask: np.ndarray, optional Boolean real space mask to select positions in datacube to skip for reconstruction verbose: bool, optional @@ -125,6 +127,7 @@ def __init__( initial_object_guess: np.ndarray = None, initial_probe_guess: np.ndarray = None, initial_scan_positions: np.ndarray = None, + positions_offset_ang: np.ndarray = None, object_type: str = "complex", positions_mask: np.ndarray = None, verbose: bool = True, @@ -184,6 +187,7 @@ def __init__( # Common Metadata self._vacuum_probe_intensity = vacuum_probe_intensity self._scan_positions = initial_scan_positions + self._positions_offset_ang = positions_offset_ang self._energy = energy self._semiangle_cutoff = semiangle_cutoff self._semiangle_cutoff_pixels = semiangle_cutoff_pixels @@ -436,6 +440,7 @@ def preprocess( self._scan_positions, self._positions_mask, self._object_padding_px, + self._positions_offset_ang, ) # initialize object diff --git a/py4DSTEM/process/phase/multislice_ptychography.py b/py4DSTEM/process/phase/multislice_ptychography.py index 03e636f57..79847938b 100644 --- a/py4DSTEM/process/phase/multislice_ptychography.py +++ b/py4DSTEM/process/phase/multislice_ptychography.py @@ -99,6 +99,8 @@ class MultislicePtychography( initial_scan_positions: np.ndarray, optional Probe positions in Å for each diffraction intensity If None, initialized to a grid scan + positions_offset_ang: np.ndarray, optional + Offset of positions in A theta_x: float x tilt of propagator in mrad theta_y: float @@ -147,6 +149,7 @@ def __init__( initial_object_guess: np.ndarray = None, initial_probe_guess: np.ndarray = None, initial_scan_positions: np.ndarray = None, + positions_offset_ang: np.ndarray = None, theta_x: float = None, theta_y: float = None, middle_focus: bool = False, @@ -209,6 +212,7 @@ def __init__( # Common Metadata self._vacuum_probe_intensity = vacuum_probe_intensity self._scan_positions = initial_scan_positions + self._positions_offset_ang = positions_offset_ang self._energy = energy self._semiangle_cutoff = semiangle_cutoff self._semiangle_cutoff_pixels = semiangle_cutoff_pixels @@ -468,6 +472,7 @@ def preprocess( self._scan_positions, self._positions_mask, self._object_padding_px, + self._positions_offset_ang, ) # initialize object diff --git a/py4DSTEM/process/phase/phase_base_class.py b/py4DSTEM/process/phase/phase_base_class.py index a67640d9b..3a36e58aa 100644 --- a/py4DSTEM/process/phase/phase_base_class.py +++ b/py4DSTEM/process/phase/phase_base_class.py @@ -1761,6 +1761,7 @@ def _calculate_scan_positions_in_pixels( positions: np.ndarray, positions_mask, object_padding_px, + positions_offset_ang, ): """ Method to compute the initial guess of scan positions in pixels. @@ -1775,6 +1776,8 @@ def _calculate_scan_positions_in_pixels( object_padding_px: Tuple[int,int], optional Pixel dimensions to pad object with If None, the padding is set to half the probe ROI dimensions + positions_offset_ang, np.ndarray, optional + Offset of positions in A Returns ------- @@ -1813,6 +1816,14 @@ def _calculate_scan_positions_in_pixels( y = (y - np.ptp(y) / 2) / sampling[1] x, y = np.meshgrid(x, y, indexing="ij") + if positions_offset_ang is not None: + if transpose: + x += positions_offset_ang[0] / sampling[1] + y += positions_offset_ang[1] / sampling[0] + else: + x += positions_offset_ang[0] / sampling[0] + y += positions_offset_ang[1] / sampling[1] + if positions_mask is not None: x = x[positions_mask] y = y[positions_mask] diff --git a/py4DSTEM/process/phase/ptychographic_tomography.py b/py4DSTEM/process/phase/ptychographic_tomography.py index b4a29fa5d..d9888a06b 100644 --- a/py4DSTEM/process/phase/ptychographic_tomography.py +++ b/py4DSTEM/process/phase/ptychographic_tomography.py @@ -106,6 +106,8 @@ class PtychographicTomography( initial_scan_positions: list of np.ndarray, optional Probe positions in Å for each diffraction intensity per tilt If None, initialized to a grid scan centered along tilt axis + positions_offset_ang: list of np.ndarray, optional + Offset of positions in A verbose: bool, optional If True, class methods will inherit this and print additional information object_type: str, optional @@ -149,6 +151,7 @@ def __init__( initial_object_guess: np.ndarray = None, initial_probe_guess: np.ndarray = None, initial_scan_positions: Sequence[np.ndarray] = None, + positions_offset_ang: Sequence[np.ndarray] = None, verbose: bool = True, device: str = "cpu", storage: str = None, @@ -193,6 +196,7 @@ def __init__( # Common Metadata self._vacuum_probe_intensity = vacuum_probe_intensity self._scan_positions = initial_scan_positions + self._positions_offset_ang = positions_offset_ang self._energy = energy self._semiangle_cutoff = semiangle_cutoff self._semiangle_cutoff_pixels = semiangle_cutoff_pixels @@ -483,6 +487,7 @@ def preprocess( self._scan_positions[index], self._positions_mask[index], self._object_padding_px, + self._positions_offset_ang[index], ) # handle semiangle specified in pixels diff --git a/py4DSTEM/process/phase/singleslice_ptychography.py b/py4DSTEM/process/phase/singleslice_ptychography.py index 71fe65cd7..56b2078bc 100644 --- a/py4DSTEM/process/phase/singleslice_ptychography.py +++ b/py4DSTEM/process/phase/singleslice_ptychography.py @@ -88,6 +88,8 @@ class SingleslicePtychography( initial_scan_positions: np.ndarray, optional Probe positions in Å for each diffraction intensity If None, initialized to a grid scan + positions_offset_ang: np.ndarray, optional + Offset of positions in A verbose: bool, optional If True, class methods will inherit this and print additional information object_type: str, optional @@ -122,6 +124,7 @@ def __init__( initial_object_guess: np.ndarray = None, initial_probe_guess: np.ndarray = None, initial_scan_positions: np.ndarray = None, + positions_offset_ang: np.ndarray = None, object_padding_px: Tuple[int, int] = None, object_type: str = "complex", positions_mask: np.ndarray = None, @@ -167,6 +170,7 @@ def __init__( # Common Metadata self._vacuum_probe_intensity = vacuum_probe_intensity self._scan_positions = initial_scan_positions + self._positions_offset_ang = positions_offset_ang self._energy = energy self._semiangle_cutoff = semiangle_cutoff self._semiangle_cutoff_pixels = semiangle_cutoff_pixels @@ -413,6 +417,7 @@ def preprocess( self._scan_positions, self._positions_mask, self._object_padding_px, + self._positions_offset_ang, ) # initialize object From 15aeca10c872fca888268141c183eb6d97fb9b58 Mon Sep 17 00:00:00 2001 From: Stephanie Ribet Date: Fri, 2 Feb 2024 14:40:17 -0800 Subject: [PATCH 12/50] remove 180 flip --- py4DSTEM/io/filereaders/read_arina.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/py4DSTEM/io/filereaders/read_arina.py b/py4DSTEM/io/filereaders/read_arina.py index 045cd0e7c..832499d3f 100644 --- a/py4DSTEM/io/filereaders/read_arina.py +++ b/py4DSTEM/io/filereaders/read_arina.py @@ -12,7 +12,6 @@ def read_arina( binfactor: int = 1, dtype_bin: float = None, flatfield: np.ndarray = None, - flip_180: bool = False, ): """ File reader for arina 4D-STEM datasets @@ -29,8 +28,6 @@ def read_arina( other than uint16 flatfield (np.ndarray): flatfield for correction factors, converts data to float - flip_180 (bool): - if True, rotates on loading data by 180 degrees Returns: DataCube @@ -98,9 +95,6 @@ def read_arina( ) ) - if flip_180: - datacube.data = np.rot90(datacube.data, k=2, axes=(-1, -2)) - return datacube From 92cd0f7765641c6258f59e38d369d97d39f72c3c Mon Sep 17 00:00:00 2001 From: Colin Date: Fri, 2 Feb 2024 15:09:02 -0800 Subject: [PATCH 13/50] tweaking amorphous fitting function --- py4DSTEM/process/polar/polar_fits.py | 71 ++++++++++++++++++++++++++-- 1 file changed, 67 insertions(+), 4 deletions(-) diff --git a/py4DSTEM/process/polar/polar_fits.py b/py4DSTEM/process/polar/polar_fits.py index 3e39c5584..81e6df721 100644 --- a/py4DSTEM/process/polar/polar_fits.py +++ b/py4DSTEM/process/polar/polar_fits.py @@ -3,15 +3,18 @@ # from scipy.optimize import leastsq from scipy.optimize import curve_fit +from emdfile import tqdmnd def fit_amorphous_ring( - im, + im = None, + datacube = None, center=None, radial_range=None, coefs=None, mask_dp=None, show_fit_mask=False, + fit_all_images = False, maxfev=None, verbose=False, plot_result=True, @@ -19,6 +22,7 @@ def fit_amorphous_ring( plot_int_scale=(-3, 3), figsize=(8, 8), return_all_coefs=True, + progress_bar = None, ): """ Fit an amorphous halo with a two-sided Gaussian model, plus a background @@ -28,6 +32,8 @@ def fit_amorphous_ring( -------- im: np.array 2D image array to perform fitting on + datacube: py4DSTEM.DataCube + datacube to perform the fitting on center: np.array (x,y) center coordinates for fitting mask. If not specified by the user, we will assume the center coordinate is (im.shape-1)/2. @@ -40,6 +46,8 @@ def fit_amorphous_ring( Dark field mask for fitting, in addition to the radial range specified above. show_fit_mask: bool Set to true to preview the fitting mask and initial guess for the ellipse params + fit_all_images: bool + Fit the elliptic parameters to all images maxfev: int Max number of fitting evaluations for curve_fit. verbose: bool @@ -63,6 +71,13 @@ def fit_amorphous_ring( 11 parameter elliptic fit coefficients """ + # If passing in a DataCube, use mean diffraction pattern for initial guess + if im is None: + im = datacube.get_dp_mean() + if progress_bar is None: + progress_bar = True + + # Default values if center is None: center = np.array(((im.shape[0] - 1) / 2, (im.shape[1] - 1) / 2)) @@ -193,7 +208,49 @@ def fit_amorphous_ring( )[0] coefs[4] = np.mod(coefs[4], 2 * np.pi) coefs[5:8] *= int_mean - # bounds=bounds + + # Perform the fit on each individual diffration pattern + if fit_all_images: + coefs_all = np.zeros(( + datacube.shape[0], + datacube.shape[1], + coefs.size)) + + for rx, ry in tqdmnd( + datacube.shape[0], + datacube.shape[1], + desc="Radial statistics", + unit=" probe positions", + disable=not progress_bar, + ): + vals = datacube.data[rx,ry][mask] + int_mean = np.mean(vals) + + if maxfev is None: + coefs_single = curve_fit( + amorphous_model, + basis, + vals / int_mean, + p0=coefs, + xtol=1e-8, + bounds=(lb, ub), + )[0] + else: + coefs_single = curve_fit( + amorphous_model, + basis, + vals / int_mean, + p0=coefs, + xtol=1e-8, + bounds=(lb, ub), + maxfev=maxfev, + )[0] + coefs_single[4] = np.mod(coefs_single[4], 2 * np.pi) + coefs_single[5:8] *= int_mean + + coefs_all[rx,ry] = coefs_single + + if verbose: print("x0 = " + str(np.round(coefs[0], 3)) + " px") @@ -214,9 +271,15 @@ def fit_amorphous_ring( # Return fit parameters if return_all_coefs: - return coefs + if fit_all_images: + return coefs_all + else: + return coefs else: - return coefs[:5] + if fit_all_images: + return coefs_all[:,:,:5] + else: + return coefs[:5] def plot_amorphous_ring( From 7a1a16728a0392f9267704eeda74fbe74d37b8bd Mon Sep 17 00:00:00 2001 From: Stephanie Ribet Date: Fri, 2 Feb 2024 15:52:31 -0800 Subject: [PATCH 14/50] median_filter_masked_pixels --- py4DSTEM/datacube/datacube.py | 20 +++++++++-- py4DSTEM/preprocess/preprocess.py | 55 +++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 2 deletions(-) diff --git a/py4DSTEM/datacube/datacube.py b/py4DSTEM/datacube/datacube.py index 4d87afdd5..77c125721 100644 --- a/py4DSTEM/datacube/datacube.py +++ b/py4DSTEM/datacube/datacube.py @@ -479,13 +479,29 @@ def filter_hot_pixels(self, thresh, ind_compare=1, return_mask=False): """ from py4DSTEM.preprocess import filter_hot_pixels - d = filter_hot_pixels( + datacube = filter_hot_pixels( self, thresh, ind_compare, return_mask, ) - return d + return datacube + + def median_filter_masked_pixels(self, mask, kernel_width: int = 3): + """ + This function fixes a datacube where the same pixels are consistently + bad. It requires a mask that identifies all the bad pixels in the dataset. + Then for each diffraction pattern, a median kernel is applied around each + bad pixel with the specified width. + """ + from py4DSTEM.preprocess import median_filter_masked_pixels + + datacube = median_filter_masked_pixels( + self, + mask, + kernel_width, + ) + return datacube # Probe diff --git a/py4DSTEM/preprocess/preprocess.py b/py4DSTEM/preprocess/preprocess.py index 9db7895d3..e9ed820fc 100644 --- a/py4DSTEM/preprocess/preprocess.py +++ b/py4DSTEM/preprocess/preprocess.py @@ -470,6 +470,61 @@ def filter_hot_pixels(datacube, thresh, ind_compare=1, return_mask=False): return datacube +def median_filter_masked_pixels(datacube, mask, kernel_width: int = 3): + """ + This function fixes a datacube where the same pixels are consistently + bad. It requires a mask that identifies all the bad pixels in the dataset. + Then for each diffraction pattern, a median kernel is applied around each + bad pixel with the specified width. + + Parameters + ---------- + datacube: + Datacube to be filtered + mask: + a boolean mask that specifies the bad pixels in the datacube + kernel_width (optional): + specifies the width of the median kernel + + Returns + ---------- + filtered datacube + """ + if kernel_width % 2 == 0: + width_max = kernel_width // 2 + width_min = kernel_width // 2 + + else: + width_max = int(kernel_width / 2 + 0.5) + width_min = int(kernel_width / 2 - 0.5) + + num_bad_pixels_indicies = np.array(np.where(mask)) + for a0 in range(num_bad_pixels_indicies.shape[1]): + index_x = num_bad_pixels_indicies[0, a0] + index_y = num_bad_pixels_indicies[1, a0] + + x_min = index_x - width_min + y_min = index_y - width_min + + x_max = index_x + width_max + y_max = index_y + width_max + + if x_min < 0: + x_min = 0 + if y_min < 0: + y_min = 0 + + if x_max > datacube.Rshape[0]: + x_max = datacube.Rshape[0] + if y_max > datacube.Rshape[1]: + y_max = datacube.Rshape[1] + + datacube.data[:, :, index_x, index_y] = np.median( + datacube.data[:, :, x_min:x_max, y_min:y_max], axis=(2, 3) + ) + return datacube + + def datacube_diffraction_shift( datacube, xshifts, From 6f022f7d36c9e3acd6e0115261f0adb3b682b15b Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Tue, 6 Feb 2024 14:26:16 -0800 Subject: [PATCH 15/50] save polar aberrations instead --- py4DSTEM/process/phase/parallax.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/py4DSTEM/process/phase/parallax.py b/py4DSTEM/process/phase/parallax.py index 58181d812..b3be9fb29 100644 --- a/py4DSTEM/process/phase/parallax.py +++ b/py4DSTEM/process/phase/parallax.py @@ -155,11 +155,8 @@ def to_h5(self, group): if hasattr(self, "aberration_dict_cartesian"): self.metadata = Metadata( - name="aberrations_metadata", - data={ - v["aberration name"]: v["value [Ang]"] - for k, v in self.aberration_dict_cartesian.items() - }, + name="aberrations_polar_metadata", + data=self.aberration_dict_polar, ) self.metadata = Metadata( From be2cd20685c63a9d532c38f882d67a0d7f0f9457 Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Tue, 6 Feb 2024 14:37:08 -0800 Subject: [PATCH 16/50] read-write compatibility --- py4DSTEM/process/phase/dpc.py | 2 ++ py4DSTEM/process/phase/parallax.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/py4DSTEM/process/phase/dpc.py b/py4DSTEM/process/phase/dpc.py index 5a2210d59..a9468a002 100644 --- a/py4DSTEM/process/phase/dpc.py +++ b/py4DSTEM/process/phase/dpc.py @@ -193,6 +193,8 @@ def _get_constructor_args(cls, group): "name": instance_md["name"], "verbose": True, # for compatibility "device": "cpu", # for compatibility + "storage": "cpu", # for compatibility + "clear_fft_cache": True, # for compatibility } return kwargs diff --git a/py4DSTEM/process/phase/parallax.py b/py4DSTEM/process/phase/parallax.py index b3be9fb29..403f89eca 100644 --- a/py4DSTEM/process/phase/parallax.py +++ b/py4DSTEM/process/phase/parallax.py @@ -209,6 +209,8 @@ def _get_constructor_args(cls, group): "name": instance_md["name"], "verbose": True, # for compatibility "device": "cpu", # for compatibility + "storage": "cpu", # for compatibility + "clear_fft_cache": True, # for compatibility } return kwargs From 7ec01602f564bfb152ed40d26724738eb51ad2f6 Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Tue, 6 Feb 2024 14:39:28 -0800 Subject: [PATCH 17/50] uncertainty viz bug --- py4DSTEM/process/phase/ptychographic_visualizations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py4DSTEM/process/phase/ptychographic_visualizations.py b/py4DSTEM/process/phase/ptychographic_visualizations.py index 58dd224cf..51af028d4 100644 --- a/py4DSTEM/process/phase/ptychographic_visualizations.py +++ b/py4DSTEM/process/phase/ptychographic_visualizations.py @@ -758,7 +758,7 @@ def show_uncertainty_visualization( pix_output[np.logical_not(sub)] = 1 pix_output = pix_output[padding[0] : -padding[0], padding[1] : -padding[1]] pix_output, _, _ = return_scaled_histogram_ordering( - pix_output.get(), normalize=True + asnumpy(pix_output), normalize=True ) ## Visualization From 4f433f033e469534d55b20c2e15ce4577a734d1e Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Tue, 6 Feb 2024 15:07:36 -0800 Subject: [PATCH 18/50] adding force_com_measured functionality to ptycho --- .../phase/magnetic_ptychographic_tomography.py | 11 +++++++++++ py4DSTEM/process/phase/magnetic_ptychography.py | 13 ++++++++++++- .../phase/mixedstate_multislice_ptychography.py | 6 ++++++ .../process/phase/mixedstate_ptychography.py | 14 ++++++++++++-- .../process/phase/multislice_ptychography.py | 8 +++++++- py4DSTEM/process/phase/phase_base_class.py | 16 +++++++++++++++- .../process/phase/ptychographic_tomography.py | 11 +++++++++++ .../process/phase/singleslice_ptychography.py | 10 ++++++++-- 8 files changed, 82 insertions(+), 7 deletions(-) diff --git a/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py b/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py index 13cd9a60f..e1bae7141 100644 --- a/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py +++ b/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py @@ -231,6 +231,7 @@ def preprocess( diffraction_patterns_rotate_degrees: float = None, diffraction_patterns_transpose: bool = None, force_com_shifts: Sequence[float] = None, + force_com_measured: Sequence[np.ndarray] = None, vectorized_com_calculation: bool = True, progress_bar: bool = True, force_scan_sampling: float = None, @@ -279,6 +280,8 @@ def preprocess( Amplitudes come from diffraction patterns shifted with the CoM in the upper left corner for each probe unless shift is overwritten. One tuple per tilt. + force_com_measured: tuple of ndarrays (CoMx measured, CoMy measured) + Force CoM measured shifts vectorized_com_calculation: bool, optional If True (default), the memory-intensive CoM calculation is vectorized force_scan_sampling: float, optional @@ -381,6 +384,9 @@ def preprocess( if force_com_shifts is None: force_com_shifts = [None] * self._num_measurements + if force_com_measured is None: + force_com_measured = [None] * self._num_measurements + self._rotation_best_rad = np.deg2rad(diffraction_patterns_rotate_degrees) self._rotation_best_transpose = diffraction_patterns_transpose @@ -398,6 +404,7 @@ def preprocess( self._vacuum_probe_intensity, self._dp_mask, force_com_shifts[index], + force_com_measured[index], ) = self._preprocess_datacube_and_vacuum_probe( self._datacube[index], diffraction_intensities_shape=self._diffraction_intensities_shape, @@ -406,6 +413,7 @@ def preprocess( vacuum_probe_intensity=self._vacuum_probe_intensity, dp_mask=self._dp_mask, com_shifts=force_com_shifts[index], + com_measured=force_com_measured[index], ) else: @@ -414,6 +422,7 @@ def preprocess( _, _, force_com_shifts[index], + force_com_measured[index], ) = self._preprocess_datacube_and_vacuum_probe( self._datacube[index], diffraction_intensities_shape=self._diffraction_intensities_shape, @@ -422,6 +431,7 @@ def preprocess( vacuum_probe_intensity=None, dp_mask=None, com_shifts=force_com_shifts[index], + com_measured=force_com_measured[index], ) # calibrations @@ -447,6 +457,7 @@ def preprocess( fit_function=fit_function, com_shifts=force_com_shifts[index], vectorized_calculation=vectorized_com_calculation, + com_measured=force_com_measured[index], ) # corner-center amplitudes diff --git a/py4DSTEM/process/phase/magnetic_ptychography.py b/py4DSTEM/process/phase/magnetic_ptychography.py index e56b9f5d4..fc7306cad 100644 --- a/py4DSTEM/process/phase/magnetic_ptychography.py +++ b/py4DSTEM/process/phase/magnetic_ptychography.py @@ -210,7 +210,8 @@ def preprocess( plot_probe_overlaps: bool = True, force_com_rotation: float = None, force_com_transpose: float = None, - force_com_shifts: float = None, + force_com_shifts: Sequence[np.ndarray] = None, + force_com_measured: Sequence[np.ndarray] = None, vectorized_com_calculation: bool = True, force_scan_sampling: float = None, force_angular_sampling: float = None, @@ -269,6 +270,8 @@ def preprocess( Amplitudes come from diffraction patterns shifted with the CoM in the upper left corner for each probe unless shift is overwritten. + force_com_measured: tuple of ndarrays (CoMx measured, CoMy measured) + Force CoM measured shifts vectorized_com_calculation: bool, optional If True (default), the memory-intensive CoM calculation is vectorized force_scan_sampling: float, optional @@ -415,6 +418,9 @@ def preprocess( if force_com_shifts is None: force_com_shifts = [None] * self._num_measurements + if force_com_measured is None: + force_com_measured = [None] * self._num_measurements + if self._scan_positions is None: self._scan_positions = [None] * self._num_measurements @@ -435,6 +441,7 @@ def preprocess( self._vacuum_probe_intensity, self._dp_mask, force_com_shifts[index], + force_com_measured[index], ) = self._preprocess_datacube_and_vacuum_probe( self._datacube[index], diffraction_intensities_shape=self._diffraction_intensities_shape, @@ -443,6 +450,7 @@ def preprocess( vacuum_probe_intensity=self._vacuum_probe_intensity, dp_mask=self._dp_mask, com_shifts=force_com_shifts[index], + com_measured=force_com_measured[index], ) else: @@ -451,6 +459,7 @@ def preprocess( _, _, force_com_shifts[index], + force_com_measured[index], ) = self._preprocess_datacube_and_vacuum_probe( self._datacube[index], diffraction_intensities_shape=self._diffraction_intensities_shape, @@ -459,6 +468,7 @@ def preprocess( vacuum_probe_intensity=None, dp_mask=None, com_shifts=force_com_shifts[index], + com_measured=force_com_measured[index], ) # calibrations @@ -484,6 +494,7 @@ def preprocess( fit_function=fit_function, com_shifts=force_com_shifts[index], vectorized_calculation=vectorized_com_calculation, + com_measured=force_com_measured[index], ) # estimate rotation / transpose using first measurement diff --git a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py index 3e9a58f6b..f8edd8768 100644 --- a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py @@ -271,6 +271,7 @@ def preprocess( force_com_rotation: float = None, force_com_transpose: float = None, force_com_shifts: float = None, + force_com_measured: Sequence[np.ndarray] = None, vectorized_com_calculation: bool = True, force_scan_sampling: float = None, force_angular_sampling: float = None, @@ -331,6 +332,8 @@ def preprocess( Amplitudes come from diffraction patterns shifted with the CoM in the upper left corner for each probe unless shift is overwritten. + force_com_measured: tuple of ndarrays (CoMx measured, CoMy measured) + Force CoM measured shifts vectorized_com_calculation: bool, optional If True (default), the memory-intensive CoM calculation is vectorized force_scan_sampling: float, optional @@ -390,6 +393,7 @@ def preprocess( self._vacuum_probe_intensity, self._dp_mask, force_com_shifts, + force_com_measured, ) = self._preprocess_datacube_and_vacuum_probe( self._datacube, diffraction_intensities_shape=self._diffraction_intensities_shape, @@ -398,6 +402,7 @@ def preprocess( vacuum_probe_intensity=self._vacuum_probe_intensity, dp_mask=self._dp_mask, com_shifts=force_com_shifts, + com_measured=force_com_measured, ) # calibrations @@ -429,6 +434,7 @@ def preprocess( fit_function=fit_function, com_shifts=force_com_shifts, vectorized_calculation=vectorized_com_calculation, + com_measured=force_com_measured, ) # estimate rotation / transpose diff --git a/py4DSTEM/process/phase/mixedstate_ptychography.py b/py4DSTEM/process/phase/mixedstate_ptychography.py index 817517879..8e0802400 100644 --- a/py4DSTEM/process/phase/mixedstate_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_ptychography.py @@ -3,7 +3,7 @@ namely mixed-state ptychography. """ -from typing import Mapping, Tuple +from typing import Mapping, Sequence, Tuple, Union import matplotlib.pyplot as plt import numpy as np @@ -216,7 +216,9 @@ def preprocess( plot_probe_overlaps: bool = True, force_com_rotation: float = None, force_com_transpose: float = None, - force_com_shifts: float = None, + force_com_shifts: Union[Sequence[np.ndarray], Sequence[float]] = None, + force_com_measured: Sequence[np.ndarray] = None, + vectorized_com_calculation: bool = True, force_scan_sampling: float = None, force_angular_sampling: float = None, force_reciprocal_sampling: float = None, @@ -276,6 +278,10 @@ def preprocess( Amplitudes come from diffraction patterns shifted with the CoM in the upper left corner for each probe unless shift is overwritten. + force_com_measured: tuple of ndarrays (CoMx measured, CoMy measured) + Force CoM measured shifts + vectorized_com_calculation: bool, optional + If True (default), the memory-intensive CoM calculation is vectorized force_scan_sampling: float, optional Override DataCube real space scan pixel size calibrations, in Angstrom force_angular_sampling: float, optional @@ -333,6 +339,7 @@ def preprocess( self._vacuum_probe_intensity, self._dp_mask, force_com_shifts, + force_com_measured, ) = self._preprocess_datacube_and_vacuum_probe( self._datacube, diffraction_intensities_shape=self._diffraction_intensities_shape, @@ -341,6 +348,7 @@ def preprocess( vacuum_probe_intensity=self._vacuum_probe_intensity, dp_mask=self._dp_mask, com_shifts=force_com_shifts, + com_measured=force_com_measured, ) # calibrations @@ -371,6 +379,8 @@ def preprocess( dp_mask=self._dp_mask, fit_function=fit_function, com_shifts=force_com_shifts, + vectorized_calculation=vectorized_com_calculation, + com_measured=force_com_measured, ) # estimate rotation / transpose diff --git a/py4DSTEM/process/phase/multislice_ptychography.py b/py4DSTEM/process/phase/multislice_ptychography.py index 79847938b..418af89ce 100644 --- a/py4DSTEM/process/phase/multislice_ptychography.py +++ b/py4DSTEM/process/phase/multislice_ptychography.py @@ -244,7 +244,8 @@ def preprocess( plot_probe_overlaps: bool = True, force_com_rotation: float = None, force_com_transpose: float = None, - force_com_shifts: float = None, + force_com_shifts: Union[Sequence[np.ndarray], Sequence[float]] = None, + force_com_measured: Sequence[np.ndarray] = None, vectorized_com_calculation: bool = True, force_scan_sampling: float = None, force_angular_sampling: float = None, @@ -305,6 +306,8 @@ def preprocess( Amplitudes come from diffraction patterns shifted with the CoM in the upper left corner for each probe unless shift is overwritten. + force_com_measured: tuple of ndarrays (CoMx measured, CoMy measured) + Force CoM measured shifts vectorized_com_calculation: bool, optional If True (default), the memory-intensive CoM calculation is vectorized force_scan_sampling: float, optional @@ -364,6 +367,7 @@ def preprocess( self._vacuum_probe_intensity, self._dp_mask, force_com_shifts, + force_com_measured, ) = self._preprocess_datacube_and_vacuum_probe( self._datacube, diffraction_intensities_shape=self._diffraction_intensities_shape, @@ -372,6 +376,7 @@ def preprocess( vacuum_probe_intensity=self._vacuum_probe_intensity, dp_mask=self._dp_mask, com_shifts=force_com_shifts, + com_measured=force_com_measured, ) # calibrations @@ -403,6 +408,7 @@ def preprocess( fit_function=fit_function, com_shifts=force_com_shifts, vectorized_calculation=vectorized_com_calculation, + com_measured=force_com_measured, ) # estimate rotation / transpose diff --git a/py4DSTEM/process/phase/phase_base_class.py b/py4DSTEM/process/phase/phase_base_class.py index 3a36e58aa..bfc62a7d2 100644 --- a/py4DSTEM/process/phase/phase_base_class.py +++ b/py4DSTEM/process/phase/phase_base_class.py @@ -229,6 +229,7 @@ def _preprocess_datacube_and_vacuum_probe( vacuum_probe_intensity=None, dp_mask=None, com_shifts=None, + com_measured=None, ): """ Datacube preprocessing step, to set the reciprocal- and real-space sampling. @@ -279,6 +280,13 @@ def _preprocess_datacube_and_vacuum_probe( np.ones(datacube.Rshape) * com_shifts[1], ) + if com_measured is not None: + if np.isscalar(com_measured[0]): + com_measured = ( + np.ones(datacube.Rshape) * com_measured[0], + np.ones(datacube.Rshape) * com_measured[1], + ) + if diffraction_intensities_shape is not None: Qx, Qy = datacube.shape[-2:] Sx, Sy = diffraction_intensities_shape @@ -297,6 +305,12 @@ def _preprocess_datacube_and_vacuum_probe( com_shifts[1] * resampling_factor_x, ) + if com_measured is not None: + com_measured = ( + com_measured[0] * resampling_factor_x, + com_measured[1] * resampling_factor_x, + ) + if reshaping_method == "bin": bin_factor = int(1 / resampling_factor_x) if bin_factor < 1: @@ -390,7 +404,7 @@ def _preprocess_datacube_and_vacuum_probe( if dp_mask is not None: dp_mask = np.pad(dp_mask, pad_width=(pad_kx, pad_ky), mode="constant") - return datacube, vacuum_probe_intensity, dp_mask, com_shifts + return datacube, vacuum_probe_intensity, dp_mask, com_shifts, com_measured def _extract_intensities_and_calibrations_from_datacube( self, diff --git a/py4DSTEM/process/phase/ptychographic_tomography.py b/py4DSTEM/process/phase/ptychographic_tomography.py index d9888a06b..c1957dbf5 100644 --- a/py4DSTEM/process/phase/ptychographic_tomography.py +++ b/py4DSTEM/process/phase/ptychographic_tomography.py @@ -225,6 +225,7 @@ def preprocess( diffraction_patterns_rotate_degrees: float = None, diffraction_patterns_transpose: bool = None, force_com_shifts: Sequence[float] = None, + force_com_measured: Sequence[np.ndarray] = None, vectorized_com_calculation: bool = True, force_scan_sampling: float = None, force_angular_sampling: float = None, @@ -274,6 +275,8 @@ def preprocess( Amplitudes come from diffraction patterns shifted with the CoM in the upper left corner for each probe unless shift is overwritten. One tuple per tilt. + force_com_measured: tuple of ndarrays (CoMx measured, CoMy measured) + Force CoM measured shifts vectorized_com_calculation: bool, optional If True (default), the memory-intensive CoM calculation is vectorized force_scan_sampling: float, optional @@ -380,6 +383,9 @@ def preprocess( if force_com_shifts is None: force_com_shifts = [None] * self._num_measurements + if force_com_measured is None: + force_com_measured = [None] * self._num_measurements + self._rotation_best_rad = np.deg2rad(diffraction_patterns_rotate_degrees) self._rotation_best_transpose = diffraction_patterns_transpose @@ -397,6 +403,7 @@ def preprocess( self._vacuum_probe_intensity, self._dp_mask, force_com_shifts[index], + force_com_measured[index], ) = self._preprocess_datacube_and_vacuum_probe( self._datacube[index], diffraction_intensities_shape=self._diffraction_intensities_shape, @@ -405,6 +412,7 @@ def preprocess( vacuum_probe_intensity=self._vacuum_probe_intensity, dp_mask=self._dp_mask, com_shifts=force_com_shifts[index], + com_measured=force_com_measured[index], ) else: @@ -413,6 +421,7 @@ def preprocess( _, _, force_com_shifts[index], + force_com_measured[index], ) = self._preprocess_datacube_and_vacuum_probe( self._datacube[index], diffraction_intensities_shape=self._diffraction_intensities_shape, @@ -421,6 +430,7 @@ def preprocess( vacuum_probe_intensity=None, dp_mask=None, com_shifts=force_com_shifts[index], + com_measured=force_com_measured[index], ) # calibrations @@ -446,6 +456,7 @@ def preprocess( fit_function=fit_function, com_shifts=force_com_shifts[index], vectorized_calculation=vectorized_com_calculation, + com_measured=force_com_measured[index], ) # corner-center amplitudes diff --git a/py4DSTEM/process/phase/singleslice_ptychography.py b/py4DSTEM/process/phase/singleslice_ptychography.py index 56b2078bc..5c56d79da 100644 --- a/py4DSTEM/process/phase/singleslice_ptychography.py +++ b/py4DSTEM/process/phase/singleslice_ptychography.py @@ -3,7 +3,7 @@ namely (single-slice) ptychography. """ -from typing import Mapping, Tuple +from typing import Mapping, Sequence, Tuple, Union import matplotlib.pyplot as plt import numpy as np @@ -198,7 +198,8 @@ def preprocess( plot_probe_overlaps: bool = True, force_com_rotation: float = None, force_com_transpose: float = None, - force_com_shifts: float = None, + force_com_shifts: Union[Sequence[np.ndarray], Sequence[float]] = None, + force_com_measured: Sequence[np.ndarray] = None, vectorized_com_calculation: bool = True, force_scan_sampling: float = None, force_angular_sampling: float = None, @@ -249,6 +250,8 @@ def preprocess( Amplitudes come from diffraction patterns shifted with the CoM in the upper left corner for each probe unless shift is overwritten. + force_com_measured: tuple of ndarrays (CoMx measured, CoMy measured) + Force CoM measured shifts vectorized_com_calculation: bool, optional If True (default), the memory-intensive CoM calculation is vectorized force_scan_sampling: float, optional @@ -309,6 +312,7 @@ def preprocess( self._vacuum_probe_intensity, self._dp_mask, force_com_shifts, + force_com_measured, ) = self._preprocess_datacube_and_vacuum_probe( self._datacube, diffraction_intensities_shape=self._diffraction_intensities_shape, @@ -317,6 +321,7 @@ def preprocess( vacuum_probe_intensity=self._vacuum_probe_intensity, dp_mask=self._dp_mask, com_shifts=force_com_shifts, + com_measured=force_com_measured, ) # calibrations @@ -348,6 +353,7 @@ def preprocess( fit_function=fit_function, com_shifts=force_com_shifts, vectorized_calculation=vectorized_com_calculation, + com_measured=force_com_measured, ) # estimate rotation / transpose From 314ee79bd87b35461a78100b77f09a007b7f194a Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Wed, 7 Feb 2024 09:56:18 -0800 Subject: [PATCH 19/50] clean up force_com_measured --- py4DSTEM/process/phase/phase_base_class.py | 35 ++++++++++++++-------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/py4DSTEM/process/phase/phase_base_class.py b/py4DSTEM/process/phase/phase_base_class.py index bfc62a7d2..2a8b653ed 100644 --- a/py4DSTEM/process/phase/phase_base_class.py +++ b/py4DSTEM/process/phase/phase_base_class.py @@ -646,7 +646,8 @@ def _calculate_intensities_center_of_mass( reciprocal_sampling = self._reciprocal_sampling if com_measured: - com_measured_x, com_measured_y = com_measured + com_measured_x = xp.asarray(com_measured[0], dtype=xp.float32) + com_measured_y = xp.asarray(com_measured[1], dtype=xp.float32) else: if dp_mask is not None: @@ -694,7 +695,7 @@ def _calculate_intensities_center_of_mass( for rx, ry in tqdmnd( sx, sy, - desc="Fitting center of mass", + desc="Calculating center of mass", unit="probe position", disable=not self._verbose, ): @@ -710,19 +711,27 @@ def _calculate_intensities_center_of_mass( ) if com_shifts is None: - com_measured_x_np = asnumpy(com_measured_x) - com_measured_y_np = asnumpy(com_measured_y) - finite_mask = np.isfinite(com_measured_x_np) - - com_shifts = fit_origin( - (com_measured_x_np, com_measured_y_np), - fitfunction=fit_function, - mask=finite_mask, - ) + if fit_function is not None: + com_measured_x_np = asnumpy(com_measured_x) + com_measured_y_np = asnumpy(com_measured_y) + finite_mask = np.isfinite(com_measured_x_np) + + com_shifts = fit_origin( + (com_measured_x_np, com_measured_y_np), + fitfunction=fit_function, + mask=finite_mask, + ) + + com_fitted_x = xp.asarray(com_shifts[0], dtype=xp.float32) + com_fitted_y = xp.asarray(com_shifts[1], dtype=xp.float32) + else: + com_fitted_x = xp.asarray(com_measured_x, dtype=xp.float32) + com_fitted_y = xp.asarray(com_measured_y, dtype=xp.float32) + else: + com_fitted_x = xp.asarray(com_shifts[0], dtype=xp.float32) + com_fitted_y = xp.asarray(com_shifts[1], dtype=xp.float32) # Fit function to center of mass - com_fitted_x = xp.asarray(com_shifts[0], dtype=xp.float32) - com_fitted_y = xp.asarray(com_shifts[1], dtype=xp.float32) # fix CoM units com_normalized_x = ( From dde1edabf58b51a15ee6cfb0c344dc5e73b4ccc5 Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Wed, 7 Feb 2024 13:05:27 -0800 Subject: [PATCH 20/50] adding DP normalization progress bar --- py4DSTEM/process/phase/phase_base_class.py | 41 ++++++++++++---------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/py4DSTEM/process/phase/phase_base_class.py b/py4DSTEM/process/phase/phase_base_class.py index 2a8b653ed..8be11112f 100644 --- a/py4DSTEM/process/phase/phase_base_class.py +++ b/py4DSTEM/process/phase/phase_base_class.py @@ -1407,27 +1407,30 @@ def _normalize_diffraction_intensities( ) counter = 0 - for rx in range(diffraction_intensities.shape[0]): - for ry in range(diffraction_intensities.shape[1]): - if positions_mask is not None: - if not positions_mask[rx, ry]: - continue - intensities = get_shifted_ar( - diffraction_intensities[rx, ry], - -com_fitted_x[rx, ry], - -com_fitted_y[rx, ry], - bilinear=True, - device="cpu", - ) + for rx, ry in tqdmnd( + diffraction_intensities.shape[0], + diffraction_intensities.shape[1], + desc="Normalizing amplitudes", + unit="probe position", + disable=not self._verbose, + ): + if positions_mask is not None: + if not positions_mask[rx, ry]: + continue + intensities = get_shifted_ar( + diffraction_intensities[rx, ry], + -com_fitted_x[rx, ry], + -com_fitted_y[rx, ry], + bilinear=True, + device="cpu", + ) - if crop_patterns: - intensities = intensities[crop_mask].reshape( - region_of_interest_shape - ) + if crop_patterns: + intensities = intensities[crop_mask].reshape(region_of_interest_shape) - mean_intensity += np.sum(intensities) - amplitudes[counter] = np.sqrt(np.maximum(intensities, 0)) - counter += 1 + mean_intensity += np.sum(intensities) + amplitudes[counter] = np.sqrt(np.maximum(intensities, 0)) + counter += 1 mean_intensity /= amplitudes.shape[0] From 4cca38af065cd64596b5fc5e286b2786291d018b Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Wed, 7 Feb 2024 13:37:31 -0800 Subject: [PATCH 21/50] moving fov_mask calc to as needed --- .../magnetic_ptychographic_tomography.py | 88 +++++-------------- .../process/phase/magnetic_ptychography.py | 54 ++++++++---- .../mixedstate_multislice_ptychography.py | 36 ++++---- .../process/phase/mixedstate_ptychography.py | 36 ++++---- .../process/phase/multislice_ptychography.py | 36 ++++---- .../process/phase/ptychographic_tomography.py | 88 +++++-------------- .../process/phase/singleslice_ptychography.py | 37 ++++---- 7 files changed, 158 insertions(+), 217 deletions(-) diff --git a/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py b/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py index e1bae7141..8ff2b086f 100644 --- a/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py +++ b/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py @@ -237,7 +237,7 @@ def preprocess( force_scan_sampling: float = None, force_angular_sampling: float = None, force_reciprocal_sampling: float = None, - object_fov_mask: np.ndarray = None, + object_fov_mask: np.ndarray = True, crop_patterns: bool = False, device: str = None, clear_fft_cache: bool = None, @@ -387,9 +387,17 @@ def preprocess( if force_com_measured is None: force_com_measured = [None] * self._num_measurements + if self._positions_offset_ang is None: + self._positions_offset_ang = [None] * self._num_measurements + self._rotation_best_rad = np.deg2rad(diffraction_patterns_rotate_degrees) self._rotation_best_transpose = diffraction_patterns_transpose + if progress_bar: + # turn off verbosity to play nice with tqdm + verbose = self._verbose + self._verbose = False + # loop over DPs for preprocessing for index in tqdmnd( self._num_measurements, @@ -502,6 +510,10 @@ def preprocess( self._positions_offset_ang[index], ) + if progress_bar: + # reset verbosity + self._verbose = verbose + # handle semiangle specified in pixels if self._semiangle_cutoff_pixels: self._semiangle_cutoff = ( @@ -595,65 +607,16 @@ def preprocess( self._slice_thicknesses, ) - # overlaps - if max_batch_size is None: - max_batch_size = self._num_diffraction_patterns - - if object_fov_mask is None: - probe_overlap_3D = xp.zeros_like(self._object[0]) - old_rot_matrix = np.eye(3) # identity - - for index in range(self._num_measurements): - idx_start = self._cum_probes_per_measurement[index] - idx_end = self._cum_probes_per_measurement[index + 1] - - rot_matrix = self._tilt_orientation_matrices[index] - - probe_overlap_3D = self._rotate_zxy_volume( - probe_overlap_3D, - rot_matrix @ old_rot_matrix.T, - ) - - probe_overlap = xp.zeros(self._object_shape, dtype=xp.float32) - - num_diffraction_patterns = idx_end - idx_start - shuffled_indices = np.arange(idx_start, idx_end) - - for start, end in generate_batches( - num_diffraction_patterns, max_batch=max_batch_size - ): - # batch indices - batch_indices = shuffled_indices[start:end] - positions_px = self._positions_px_all[batch_indices] - positions_px_fractional = positions_px - xp_storage.round( - positions_px - ) - - shifted_probes = fft_shift( - self._probes_all[index], positions_px_fractional, xp - ) - probe_overlap += self._sum_overlapping_patches_bincounts( - xp.abs(shifted_probes) ** 2, positions_px - ) - - del shifted_probes - - probe_overlap_3D += probe_overlap[None] - old_rot_matrix = rot_matrix - - probe_overlap_3D = self._rotate_zxy_volume( - probe_overlap_3D, - old_rot_matrix.T, - ) - - gaussian_filter = self._scipy.ndimage.gaussian_filter - probe_overlap_3D_blurred = gaussian_filter(probe_overlap_3D, 1.0) - self._object_fov_mask = asnumpy( - probe_overlap_3D_blurred > 0.25 * probe_overlap_3D_blurred.max() - ) - + if object_fov_mask is not True: + raise NotImplementedError() else: - self._object_fov_mask = np.asarray(object_fov_mask) + self._object_fov_mask = np.full(self._object_shape, True) + self._object_fov_mask_inverse = np.invert(self._object_fov_mask) + + # plot probe overlaps + if plot_probe_overlaps: + if max_batch_size is None: + max_batch_size = self._num_diffraction_patterns probe_overlap = xp.zeros(self._object_shape, dtype=xp.float32) @@ -672,13 +635,8 @@ def preprocess( ) del shifted_probes + probe_overlap = asnumpy(probe_overlap) - self._object_fov_mask_inverse = np.invert(self._object_fov_mask) - - probe_overlap = asnumpy(probe_overlap) - - # plot probe overlaps - if plot_probe_overlaps: figsize = kwargs.pop("figsize", (13, 4)) chroma_boost = kwargs.pop("chroma_boost", 1) power = kwargs.pop("power", 2) diff --git a/py4DSTEM/process/phase/magnetic_ptychography.py b/py4DSTEM/process/phase/magnetic_ptychography.py index fc7306cad..1be1f7b7f 100644 --- a/py4DSTEM/process/phase/magnetic_ptychography.py +++ b/py4DSTEM/process/phase/magnetic_ptychography.py @@ -217,7 +217,7 @@ def preprocess( force_angular_sampling: float = None, force_reciprocal_sampling: float = None, progress_bar: bool = True, - object_fov_mask: np.ndarray = None, + object_fov_mask: np.ndarray = True, crop_patterns: bool = False, device: str = None, clear_fft_cache: bool = None, @@ -424,9 +424,17 @@ def preprocess( if self._scan_positions is None: self._scan_positions = [None] * self._num_measurements + if self._positions_offset_ang is None: + self._positions_offset_ang = [None] * self._num_measurements + # Ensure plot_center_of_mass is not in kwargs kwargs.pop("plot_center_of_mass", None) + if progress_bar: + # turn off verbosity to play nice with tqdm + verbose = self._verbose + self._verbose = False + # loop over DPs for preprocessing for index in tqdmnd( self._num_measurements, @@ -563,9 +571,13 @@ def preprocess( self._scan_positions[index], self._positions_mask[index], self._object_padding_px, - self._positions_offset_ang, + self._positions_offset_ang[index], ) + if progress_bar: + # reset verbosity + self._verbose = verbose + # handle semiangle specified in pixels if self._semiangle_cutoff_pixels: self._semiangle_cutoff = ( @@ -642,25 +654,28 @@ def preprocess( device=self._device, )._evaluate_ctf() - # overlaps - if max_batch_size is None: - max_batch_size = self._num_diffraction_patterns + if object_fov_mask is None or plot_probe_overlaps: + # overlaps + if max_batch_size is None: + max_batch_size = self._num_diffraction_patterns - probe_overlap = xp.zeros(self._object_shape, dtype=xp.float32) + probe_overlap = xp.zeros(self._object_shape, dtype=xp.float32) - for start, end in generate_batches( - self._cum_probes_per_measurement[1], max_batch=max_batch_size - ): - # batch indices - positions_px = self._positions_px_all[start:end] - positions_px_fractional = positions_px - xp_storage.round(positions_px) + for start, end in generate_batches( + self._cum_probes_per_measurement[1], max_batch=max_batch_size + ): + # batch indices + positions_px = self._positions_px_all[start:end] + positions_px_fractional = positions_px - xp_storage.round(positions_px) - shifted_probes = fft_shift(self._probes_all[0], positions_px_fractional, xp) - probe_overlap += self._sum_overlapping_patches_bincounts( - xp.abs(shifted_probes) ** 2, positions_px - ) + shifted_probes = fft_shift( + self._probes_all[0], positions_px_fractional, xp + ) + probe_overlap += self._sum_overlapping_patches_bincounts( + xp.abs(shifted_probes) ** 2, positions_px + ) - del shifted_probes + del shifted_probes # initialize object_fov_mask if object_fov_mask is None: @@ -670,14 +685,15 @@ def preprocess( probe_overlap_blurred > 0.25 * probe_overlap_blurred.max() ) del probe_overlap_blurred + elif object_fov_mask is True: + self._object_fov_mask = np.full(self._object_shape, True) else: self._object_fov_mask = np.asarray(object_fov_mask) self._object_fov_mask_inverse = np.invert(self._object_fov_mask) - probe_overlap = asnumpy(probe_overlap) - # plot probe overlaps if plot_probe_overlaps: + probe_overlap = asnumpy(probe_overlap) figsize = kwargs.pop("figsize", (9, 4)) chroma_boost = kwargs.pop("chroma_boost", 1) power = kwargs.pop("power", 2) diff --git a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py index f8edd8768..a5ddff454 100644 --- a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py @@ -565,25 +565,26 @@ def preprocess( self._theta_y, ) - # overlaps - if max_batch_size is None: - max_batch_size = self._num_diffraction_patterns + if object_fov_mask is None or plot_probe_overlaps: + # overlaps + if max_batch_size is None: + max_batch_size = self._num_diffraction_patterns - probe_overlap = xp.zeros(self._object_shape, dtype=xp.float32) + probe_overlap = xp.zeros(self._object_shape, dtype=xp.float32) - for start, end in generate_batches( - self._num_diffraction_patterns, max_batch=max_batch_size - ): - # batch indices - positions_px = self._positions_px[start:end] - positions_px_fractional = positions_px - xp_storage.round(positions_px) + for start, end in generate_batches( + self._num_diffraction_patterns, max_batch=max_batch_size + ): + # batch indices + positions_px = self._positions_px[start:end] + positions_px_fractional = positions_px - xp_storage.round(positions_px) - shifted_probes = fft_shift(self._probe[0], positions_px_fractional, xp) - probe_overlap += self._sum_overlapping_patches_bincounts( - xp.abs(shifted_probes) ** 2, positions_px - ) + shifted_probes = fft_shift(self._probe[0], positions_px_fractional, xp) + probe_overlap += self._sum_overlapping_patches_bincounts( + xp.abs(shifted_probes) ** 2, positions_px + ) - del shifted_probes + del shifted_probes if object_fov_mask is None: gaussian_filter = self._scipy.ndimage.gaussian_filter @@ -592,14 +593,15 @@ def preprocess( probe_overlap_blurred > 0.25 * probe_overlap_blurred.max() ) del probe_overlap_blurred + elif object_fov_mask is True: + self._object_fov_mask = np.full(self._object_shape, True) else: self._object_fov_mask = np.asarray(object_fov_mask) self._object_fov_mask_inverse = np.invert(self._object_fov_mask) - probe_overlap = asnumpy(probe_overlap) - # plot probe overlaps if plot_probe_overlaps: + probe_overlap = asnumpy(probe_overlap) figsize = kwargs.pop("figsize", (13, 4)) chroma_boost = kwargs.pop("chroma_boost", 1) power = kwargs.pop("power", 2) diff --git a/py4DSTEM/process/phase/mixedstate_ptychography.py b/py4DSTEM/process/phase/mixedstate_ptychography.py index 8e0802400..0825e18ca 100644 --- a/py4DSTEM/process/phase/mixedstate_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_ptychography.py @@ -500,25 +500,26 @@ def preprocess( self._probe_initial = self._probe.copy() self._probe_initial_aperture = xp.abs(xp.fft.fft2(self._probe)) - # overlaps - if max_batch_size is None: - max_batch_size = self._num_diffraction_patterns + if object_fov_mask is None or plot_probe_overlaps: + # overlaps + if max_batch_size is None: + max_batch_size = self._num_diffraction_patterns - probe_overlap = xp.zeros(self._object_shape, dtype=xp.float32) + probe_overlap = xp.zeros(self._object_shape, dtype=xp.float32) - for start, end in generate_batches( - self._num_diffraction_patterns, max_batch=max_batch_size - ): - # batch indices - positions_px = self._positions_px[start:end] - positions_px_fractional = positions_px - xp_storage.round(positions_px) + for start, end in generate_batches( + self._num_diffraction_patterns, max_batch=max_batch_size + ): + # batch indices + positions_px = self._positions_px[start:end] + positions_px_fractional = positions_px - xp_storage.round(positions_px) - shifted_probes = fft_shift(self._probe[0], positions_px_fractional, xp) - probe_overlap += self._sum_overlapping_patches_bincounts( - xp.abs(shifted_probes) ** 2, positions_px - ) + shifted_probes = fft_shift(self._probe[0], positions_px_fractional, xp) + probe_overlap += self._sum_overlapping_patches_bincounts( + xp.abs(shifted_probes) ** 2, positions_px + ) - del shifted_probes + del shifted_probes if object_fov_mask is None: gaussian_filter = self._scipy.ndimage.gaussian_filter @@ -527,14 +528,15 @@ def preprocess( probe_overlap_blurred > 0.25 * probe_overlap_blurred.max() ) del probe_overlap_blurred + elif object_fov_mask is True: + self._object_fov_mask = np.full(self._object_shape, True) else: self._object_fov_mask = np.asarray(object_fov_mask) self._object_fov_mask_inverse = np.invert(self._object_fov_mask) - probe_overlap = asnumpy(probe_overlap) - # plot probe overlaps if plot_probe_overlaps: + probe_overlap = asnumpy(probe_overlap) figsize = kwargs.pop("figsize", (4.5 * self._num_probes + 4, 4)) chroma_boost = kwargs.pop("chroma_boost", 1) power = kwargs.pop("power", 2) diff --git a/py4DSTEM/process/phase/multislice_ptychography.py b/py4DSTEM/process/phase/multislice_ptychography.py index 418af89ce..01c91de4b 100644 --- a/py4DSTEM/process/phase/multislice_ptychography.py +++ b/py4DSTEM/process/phase/multislice_ptychography.py @@ -539,25 +539,26 @@ def preprocess( self._theta_y, ) - # overlaps - if max_batch_size is None: - max_batch_size = self._num_diffraction_patterns + if object_fov_mask is None or plot_probe_overlaps: + # overlaps + if max_batch_size is None: + max_batch_size = self._num_diffraction_patterns - probe_overlap = xp.zeros(self._object_shape, dtype=xp.float32) + probe_overlap = xp.zeros(self._object_shape, dtype=xp.float32) - for start, end in generate_batches( - self._num_diffraction_patterns, max_batch=max_batch_size - ): - # batch indices - positions_px = self._positions_px[start:end] - positions_px_fractional = positions_px - xp_storage.round(positions_px) + for start, end in generate_batches( + self._num_diffraction_patterns, max_batch=max_batch_size + ): + # batch indices + positions_px = self._positions_px[start:end] + positions_px_fractional = positions_px - xp_storage.round(positions_px) - shifted_probes = fft_shift(self._probe, positions_px_fractional, xp) - probe_overlap += self._sum_overlapping_patches_bincounts( - xp.abs(shifted_probes) ** 2, positions_px - ) + shifted_probes = fft_shift(self._probe, positions_px_fractional, xp) + probe_overlap += self._sum_overlapping_patches_bincounts( + xp.abs(shifted_probes) ** 2, positions_px + ) - del shifted_probes + del shifted_probes if object_fov_mask is None: gaussian_filter = self._scipy.ndimage.gaussian_filter @@ -566,14 +567,15 @@ def preprocess( probe_overlap_blurred > 0.25 * probe_overlap_blurred.max() ) del probe_overlap_blurred + elif object_fov_mask is True: + self._object_fov_mask = np.full(self._object_shape, True) else: self._object_fov_mask = np.asarray(object_fov_mask) self._object_fov_mask_inverse = np.invert(self._object_fov_mask) - probe_overlap = asnumpy(probe_overlap) - # plot probe overlaps if plot_probe_overlaps: + probe_overlap = asnumpy(probe_overlap) figsize = kwargs.pop("figsize", (13, 4)) chroma_boost = kwargs.pop("chroma_boost", 1) power = kwargs.pop("power", 2) diff --git a/py4DSTEM/process/phase/ptychographic_tomography.py b/py4DSTEM/process/phase/ptychographic_tomography.py index c1957dbf5..380c0bd20 100644 --- a/py4DSTEM/process/phase/ptychographic_tomography.py +++ b/py4DSTEM/process/phase/ptychographic_tomography.py @@ -231,7 +231,7 @@ def preprocess( force_angular_sampling: float = None, force_reciprocal_sampling: float = None, progress_bar: bool = True, - object_fov_mask: np.ndarray = None, + object_fov_mask: np.ndarray = True, crop_patterns: bool = False, main_tilt_axis: str = "vertical", device: str = None, @@ -386,9 +386,17 @@ def preprocess( if force_com_measured is None: force_com_measured = [None] * self._num_measurements + if self._positions_offset_ang is None: + self._positions_offset_ang = [None] * self._num_measurements + self._rotation_best_rad = np.deg2rad(diffraction_patterns_rotate_degrees) self._rotation_best_transpose = diffraction_patterns_transpose + if progress_bar: + # turn off verbosity to play nice with tqdm + verbose = self._verbose + self._verbose = False + # loop over DPs for preprocessing for index in tqdmnd( self._num_measurements, @@ -501,6 +509,10 @@ def preprocess( self._positions_offset_ang[index], ) + if progress_bar: + # reset verbosity + self._verbose = verbose + # handle semiangle specified in pixels if self._semiangle_cutoff_pixels: self._semiangle_cutoff = ( @@ -594,65 +606,16 @@ def preprocess( self._slice_thicknesses, ) - # overlaps - if max_batch_size is None: - max_batch_size = self._num_diffraction_patterns - - if object_fov_mask is None: - probe_overlap_3D = xp.zeros_like(self._object) - old_rot_matrix = np.eye(3) # identity - - for index in range(self._num_measurements): - idx_start = self._cum_probes_per_measurement[index] - idx_end = self._cum_probes_per_measurement[index + 1] - - rot_matrix = self._tilt_orientation_matrices[index] - - probe_overlap_3D = self._rotate_zxy_volume( - probe_overlap_3D, - rot_matrix @ old_rot_matrix.T, - ) - - probe_overlap = xp.zeros(self._object_shape, dtype=xp.float32) - - num_diffraction_patterns = idx_end - idx_start - shuffled_indices = np.arange(idx_start, idx_end) - - for start, end in generate_batches( - num_diffraction_patterns, max_batch=max_batch_size - ): - # batch indices - batch_indices = shuffled_indices[start:end] - positions_px = self._positions_px_all[batch_indices] - positions_px_fractional = positions_px - xp_storage.round( - positions_px - ) - - shifted_probes = fft_shift( - self._probes_all[index], positions_px_fractional, xp - ) - probe_overlap += self._sum_overlapping_patches_bincounts( - xp.abs(shifted_probes) ** 2, positions_px - ) - - del shifted_probes - - probe_overlap_3D += probe_overlap[None] - old_rot_matrix = rot_matrix - - probe_overlap_3D = self._rotate_zxy_volume( - probe_overlap_3D, - old_rot_matrix.T, - ) - - gaussian_filter = self._scipy.ndimage.gaussian_filter - probe_overlap_3D_blurred = gaussian_filter(probe_overlap_3D, 1.0) - self._object_fov_mask = asnumpy( - probe_overlap_3D_blurred > 0.25 * probe_overlap_3D_blurred.max() - ) - + if object_fov_mask is not True: + raise NotImplementedError() else: - self._object_fov_mask = np.asarray(object_fov_mask) + self._object_fov_mask = np.full(self._object_shape, True) + self._object_fov_mask_inverse = np.invert(self._object_fov_mask) + + # plot probe overlaps + if plot_probe_overlaps: + if max_batch_size is None: + max_batch_size = self._num_diffraction_patterns probe_overlap = xp.zeros(self._object_shape, dtype=xp.float32) @@ -671,13 +634,8 @@ def preprocess( ) del shifted_probes + probe_overlap = asnumpy(probe_overlap) - self._object_fov_mask_inverse = np.invert(self._object_fov_mask) - - probe_overlap = asnumpy(probe_overlap) - - # plot probe overlaps - if plot_probe_overlaps: figsize = kwargs.pop("figsize", (13, 4)) chroma_boost = kwargs.pop("chroma_boost", 1) power = kwargs.pop("power", 2) diff --git a/py4DSTEM/process/phase/singleslice_ptychography.py b/py4DSTEM/process/phase/singleslice_ptychography.py index 5c56d79da..8d8602320 100644 --- a/py4DSTEM/process/phase/singleslice_ptychography.py +++ b/py4DSTEM/process/phase/singleslice_ptychography.py @@ -473,25 +473,26 @@ def preprocess( self._probe_initial = self._probe.copy() self._probe_initial_aperture = xp.abs(xp.fft.fft2(self._probe)) - # overlaps - if max_batch_size is None: - max_batch_size = self._num_diffraction_patterns + if object_fov_mask is None or plot_probe_overlaps: + # overlaps + if max_batch_size is None: + max_batch_size = self._num_diffraction_patterns - probe_overlap = xp.zeros(self._object_shape, dtype=xp.float32) + probe_overlap = xp.zeros(self._object_shape, dtype=xp.float32) - for start, end in generate_batches( - self._num_diffraction_patterns, max_batch=max_batch_size - ): - # batch indices - positions_px = self._positions_px[start:end] - positions_px_fractional = positions_px - xp_storage.round(positions_px) + for start, end in generate_batches( + self._num_diffraction_patterns, max_batch=max_batch_size + ): + # batch indices + positions_px = self._positions_px[start:end] + positions_px_fractional = positions_px - xp_storage.round(positions_px) - shifted_probes = fft_shift(self._probe, positions_px_fractional, xp) - probe_overlap += self._sum_overlapping_patches_bincounts( - xp.abs(shifted_probes) ** 2, positions_px - ) + shifted_probes = fft_shift(self._probe, positions_px_fractional, xp) + probe_overlap += self._sum_overlapping_patches_bincounts( + xp.abs(shifted_probes) ** 2, positions_px + ) - del shifted_probes + del shifted_probes # initialize object_fov_mask if object_fov_mask is None: @@ -501,14 +502,16 @@ def preprocess( probe_overlap_blurred > 0.25 * probe_overlap_blurred.max() ) del probe_overlap_blurred + elif object_fov_mask is True: + self._object_fov_mask = np.full(self._object_shape, True) else: self._object_fov_mask = np.asarray(object_fov_mask) self._object_fov_mask_inverse = np.invert(self._object_fov_mask) - probe_overlap = asnumpy(probe_overlap) - # plot probe overlaps if plot_probe_overlaps: + probe_overlap = asnumpy(probe_overlap) + figsize = kwargs.pop("figsize", (9, 4)) chroma_boost = kwargs.pop("chroma_boost", 1) power = kwargs.pop("power", 2) From aaf957f7cfd3a883e9851ffb43512b5ffde6b260 Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Wed, 7 Feb 2024 16:35:56 -0800 Subject: [PATCH 22/50] adding detector_fourier_mask --- .../magnetic_ptychographic_tomography.py | 12 ++++ .../process/phase/magnetic_ptychography.py | 10 +++ .../mixedstate_multislice_ptychography.py | 11 +++ .../process/phase/mixedstate_ptychography.py | 11 +++ .../process/phase/multislice_ptychography.py | 11 +++ .../process/phase/ptychographic_methods.py | 67 ++++++++++++++++--- .../process/phase/ptychographic_tomography.py | 12 ++++ .../process/phase/singleslice_ptychography.py | 11 +++ 8 files changed, 134 insertions(+), 11 deletions(-) diff --git a/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py b/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py index 8ff2b086f..eed55de53 100644 --- a/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py +++ b/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py @@ -851,6 +851,7 @@ def reconstruct( object_positivity: bool = True, shrinkage_rad: float = 0.0, fix_potential_baseline: bool = True, + detector_fourier_mask: np.ndarray = None, tv_denoise: bool = True, tv_denoise_weights=None, tv_denoise_inner_iter=40, @@ -961,6 +962,11 @@ def reconstruct( if True perform collective tilt updates shrinkage_rad: float Phase shift in radians to be subtracted from the potential at each iteration + fix_potential_baseline: bool + If true, the potential mean outside the FOV is forced to zero at each iteration + detector_fourier_mask: np.ndarray + Corner-centered mask to apply at the detector-plane for zeroing-out unreliable gradients. + Useful when detector has artifacts such as dead-pixels. Usually binary. store_iterations: bool, optional If True, reconstructed objects and probes are stored at each iteration progress_bar: bool, optional @@ -1044,6 +1050,11 @@ def reconstruct( else: max_batch_size = self._num_diffraction_patterns + if detector_fourier_mask is None: + detector_fourier_mask = xp.ones(self._amplitudes[0].shape) + else: + detector_fourier_mask = xp.asarray(detector_fourier_mask) + # initialization self._reset_reconstruction(store_iterations, reset, use_projection_scheme) @@ -1153,6 +1164,7 @@ def reconstruct( positions_px_fractional, amplitudes_device, self._exit_waves, + detector_fourier_mask, use_projection_scheme, projection_a, projection_b, diff --git a/py4DSTEM/process/phase/magnetic_ptychography.py b/py4DSTEM/process/phase/magnetic_ptychography.py index 1be1f7b7f..d9739d782 100644 --- a/py4DSTEM/process/phase/magnetic_ptychography.py +++ b/py4DSTEM/process/phase/magnetic_ptychography.py @@ -1169,6 +1169,7 @@ def reconstruct( object_positivity: bool = True, shrinkage_rad: float = 0.0, fix_potential_baseline: bool = True, + detector_fourier_mask: np.ndarray = None, store_iterations: bool = False, collective_measurement_updates: bool = True, progress_bar: bool = True, @@ -1282,6 +1283,9 @@ def reconstruct( Phase shift in radians to be subtracted from the potential at each iteration fix_potential_baseline: bool If true, the potential mean outside the FOV is forced to zero at each iteration + detector_fourier_mask: np.ndarray + Corner-centered mask to apply at the detector-plane for zeroing-out unreliable gradients. + Useful when detector has artifacts such as dead-pixels. Usually binary. store_iterations: bool, optional If True, reconstructed objects and probes are stored at each iteration collective_measurement_updates: bool @@ -1371,6 +1375,11 @@ def reconstruct( else: max_batch_size = self._num_diffraction_patterns + if detector_fourier_mask is None: + detector_fourier_mask = xp.ones(self._amplitudes[0].shape) + else: + detector_fourier_mask = xp.asarray(detector_fourier_mask) + # initialization self._reset_reconstruction(store_iterations, reset, use_projection_scheme) @@ -1455,6 +1464,7 @@ def reconstruct( positions_px_fractional, amplitudes_device, self._exit_waves, + detector_fourier_mask, use_projection_scheme=use_projection_scheme, projection_a=projection_a, projection_b=projection_b, diff --git a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py index a5ddff454..8154129ff 100644 --- a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py @@ -739,6 +739,7 @@ def reconstruct( object_positivity: bool = True, shrinkage_rad: float = 0.0, fix_potential_baseline: bool = True, + detector_fourier_mask: np.ndarray = None, pure_phase_object: bool = False, tv_denoise_chambolle: bool = True, tv_denoise_weight_chambolle=None, @@ -850,6 +851,9 @@ def reconstruct( Phase shift in radians to be subtracted from the potential at each iteration fix_potential_baseline: bool If true, the potential mean outside the FOV is forced to zero at each iteration + detector_fourier_mask: np.ndarray + Corner-centered mask to apply at the detector-plane for zeroing-out unreliable gradients. + Useful when detector has artifacts such as dead-pixels. Usually binary. pure_phase_object: bool, optional If True, object amplitude is set to unity tv_denoise_chambolle: bool @@ -897,6 +901,7 @@ def reconstruct( if object_type is not None: self._switch_object_type(object_type) + xp = self._xp xp_storage = self._xp_storage device = self._device asnumpy = self._asnumpy @@ -940,6 +945,11 @@ def reconstruct( else: max_batch_size = self._num_diffraction_patterns + if detector_fourier_mask is None: + detector_fourier_mask = xp.ones(self._amplitudes[0].shape) + else: + detector_fourier_mask = xp.asarray(detector_fourier_mask) + # initialization self._reset_reconstruction(store_iterations, reset) @@ -989,6 +999,7 @@ def reconstruct( positions_px_fractional, amplitudes_device, self._exit_waves, + detector_fourier_mask, use_projection_scheme, projection_a, projection_b, diff --git a/py4DSTEM/process/phase/mixedstate_ptychography.py b/py4DSTEM/process/phase/mixedstate_ptychography.py index 0825e18ca..9169d43f9 100644 --- a/py4DSTEM/process/phase/mixedstate_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_ptychography.py @@ -648,6 +648,7 @@ def reconstruct( object_positivity: bool = True, shrinkage_rad: float = 0.0, fix_potential_baseline: bool = True, + detector_fourier_mask: np.ndarray = None, store_iterations: bool = False, progress_bar: bool = True, reset: bool = None, @@ -756,6 +757,9 @@ def reconstruct( Phase shift in radians to be subtracted from the potential at each iteration fix_potential_baseline: bool If true, the potential mean outside the FOV is forced to zero at each iteration + detector_fourier_mask: np.ndarray + Corner-centered mask to apply at the detector-plane for zeroing-out unreliable gradients. + Useful when detector has artifacts such as dead-pixels. Usually binary. store_iterations: bool, optional If True, reconstructed objects and probes are stored at each iteration progress_bar: bool, optional @@ -791,6 +795,7 @@ def reconstruct( if object_type is not None: self._switch_object_type(object_type) + xp = self._xp xp_storage = self._xp_storage device = self._device asnumpy = self._asnumpy @@ -834,6 +839,11 @@ def reconstruct( else: max_batch_size = self._num_diffraction_patterns + if detector_fourier_mask is None: + detector_fourier_mask = xp.ones(self._amplitudes[0].shape) + else: + detector_fourier_mask = xp.asarray(detector_fourier_mask) + # initialization self._reset_reconstruction(store_iterations, reset) @@ -883,6 +893,7 @@ def reconstruct( positions_px_fractional, amplitudes_device, self._exit_waves, + detector_fourier_mask, use_projection_scheme, projection_a, projection_b, diff --git a/py4DSTEM/process/phase/multislice_ptychography.py b/py4DSTEM/process/phase/multislice_ptychography.py index 01c91de4b..c00efd270 100644 --- a/py4DSTEM/process/phase/multislice_ptychography.py +++ b/py4DSTEM/process/phase/multislice_ptychography.py @@ -711,6 +711,7 @@ def reconstruct( object_positivity: bool = True, shrinkage_rad: float = 0.0, fix_potential_baseline: bool = True, + detector_fourier_mask: np.ndarray = None, pure_phase_object: bool = False, tv_denoise_chambolle: bool = True, tv_denoise_weight_chambolle=None, @@ -822,6 +823,9 @@ def reconstruct( Phase shift in radians to be subtracted from the potential at each iteration fix_potential_baseline: bool If true, the potential mean outside the FOV is forced to zero at each iteration + detector_fourier_mask: np.ndarray + Corner-centered mask to apply at the detector-plane for zeroing-out unreliable gradients. + Useful when detector has artifacts such as dead-pixels. Usually binary. pure_phase_object: bool, optional If True, object amplitude is set to unity tv_denoise_chambolle: bool @@ -873,6 +877,7 @@ def reconstruct( if object_type is not None: self._switch_object_type(object_type) + xp = self._xp xp_storage = self._xp_storage device = self._device asnumpy = self._asnumpy @@ -916,6 +921,11 @@ def reconstruct( else: max_batch_size = self._num_diffraction_patterns + if detector_fourier_mask is None: + detector_fourier_mask = xp.ones(self._amplitudes[0].shape) + else: + detector_fourier_mask = xp.asarray(detector_fourier_mask) + # initialization self._reset_reconstruction(store_iterations, reset) @@ -965,6 +975,7 @@ def reconstruct( positions_px_fractional, amplitudes_device, self._exit_waves, + detector_fourier_mask, use_projection_scheme, projection_a, projection_b, diff --git a/py4DSTEM/process/phase/ptychographic_methods.py b/py4DSTEM/process/phase/ptychographic_methods.py index 6ab349f30..d1557760d 100644 --- a/py4DSTEM/process/phase/ptychographic_methods.py +++ b/py4DSTEM/process/phase/ptychographic_methods.py @@ -1480,7 +1480,7 @@ def _return_farfield_amplitudes(self, fourier_overlap): xp = self._xp return xp.abs(fourier_overlap) - def _gradient_descent_fourier_projection(self, amplitudes, overlap): + def _gradient_descent_fourier_projection(self, amplitudes, overlap, fourier_mask): """ Ptychographic fourier projection method for GD method. @@ -1490,6 +1490,9 @@ def _gradient_descent_fourier_projection(self, amplitudes, overlap): Normalized measured amplitudes overlap: np.ndarray object * probe overlap + fourier_mask: np.ndarray + Mask to apply at the detector-plane for zeroing-out unreliable gradients + Useful when detector has artifacts such as dead-pixels Returns -------- @@ -1507,14 +1510,16 @@ def _gradient_descent_fourier_projection(self, amplitudes, overlap): overlap, output_size=amplitudes.shape[-2:], xp=xp ) - fourier_overlap = xp.fft.fft2(overlap) + fourier_overlap = xp.fft.fft2(overlap) # * fourier_mask # GV votes to include farfield_amplitudes = self._return_farfield_amplitudes(fourier_overlap) error = xp.sum(xp.abs(amplitudes - farfield_amplitudes) ** 2) fourier_modified_overlap = amplitudes * xp.exp(1j * xp.angle(fourier_overlap)) - modified_overlap = xp.fft.ifft2(fourier_modified_overlap) - exit_waves = modified_overlap - overlap + fourier_modified_overlap = ( + fourier_modified_overlap - fourier_overlap + ) * fourier_mask + exit_waves = xp.fft.ifft2(fourier_modified_overlap) # resample back to region_of_interest_shape, note: this needs to happen in real-space if self._resample_exit_waves: @@ -1525,7 +1530,14 @@ def _gradient_descent_fourier_projection(self, amplitudes, overlap): return exit_waves, error def _projection_sets_fourier_projection( - self, amplitudes, overlap, exit_waves, projection_a, projection_b, projection_c + self, + amplitudes, + overlap, + exit_waves, + fourier_mask, + projection_a, + projection_b, + projection_c, ): """ Ptychographic fourier projection method for DM_AP and RAAR methods. @@ -1550,6 +1562,10 @@ def _projection_sets_fourier_projection( object * probe overlap exit_waves: np.ndarray previously estimated exit waves + fourier_mask: np.ndarray + Mask to apply at the detector-plane for zeroing-out unreliable gradients + Useful when detector has artifacts such as dead-pixels + Currently not implemented for projection-sets projection_a: float projection_b: float projection_c: float @@ -1562,6 +1578,9 @@ def _projection_sets_fourier_projection( Reconstruction error """ + if fourier_mask is not None: + raise NotImplementedError() + xp = self._xp projection_x = 1 - projection_a - projection_b projection_y = 1 - projection_c @@ -1610,6 +1629,7 @@ def _forward( positions_px_fractional, amplitudes, exit_waves, + fourier_mask, use_projection_scheme, projection_a, projection_b, @@ -1629,6 +1649,9 @@ def _forward( Normalized measured amplitudes exit_waves: np.ndarray previously estimated exit waves + fourier_mask: np.ndarray + Mask to apply at the detector-plane for zeroing-out unreliable gradients + Useful when detector has artifacts such as dead-pixels use_projection_scheme: bool, If True, use generalized projection update projection_a: float @@ -1664,6 +1687,7 @@ def _forward( amplitudes, overlap, exit_waves, + fourier_mask, projection_a, projection_b, projection_c, @@ -1671,7 +1695,9 @@ def _forward( else: exit_waves, error = self._gradient_descent_fourier_projection( - amplitudes, overlap + amplitudes, + overlap, + fourier_mask, ) return shifted_probes, object_patches, overlap, exit_waves, error @@ -2579,7 +2605,7 @@ def _return_farfield_amplitudes(self, fourier_overlap): xp = self._xp return xp.sqrt(xp.sum(xp.abs(fourier_overlap) ** 2, axis=1)) - def _gradient_descent_fourier_projection(self, amplitudes, overlap): + def _gradient_descent_fourier_projection(self, amplitudes, overlap, fourier_mask): """ Ptychographic fourier projection method for GD method. @@ -2589,6 +2615,9 @@ def _gradient_descent_fourier_projection(self, amplitudes, overlap): Normalized measured amplitudes overlap: np.ndarray object * probe overlap + fourier_mask: np.ndarray + Mask to apply at the detector-plane for zeroing-out unreliable gradients + Useful when detector has artifacts such as dead-pixels Returns -------- @@ -2606,7 +2635,7 @@ def _gradient_descent_fourier_projection(self, amplitudes, overlap): overlap, output_size=amplitudes.shape[-2:], xp=xp ) - fourier_overlap = xp.fft.fft2(overlap) + fourier_overlap = xp.fft.fft2(overlap) # * fourier_mask # GV votes to include farfield_amplitudes = self._return_farfield_amplitudes(fourier_overlap) error = xp.sum(xp.abs(amplitudes - farfield_amplitudes) ** 2) @@ -2614,9 +2643,11 @@ def _gradient_descent_fourier_projection(self, amplitudes, overlap): amplitude_modification = amplitudes / farfield_amplitudes fourier_modified_overlap = amplitude_modification[:, None] * fourier_overlap - modified_overlap = xp.fft.ifft2(fourier_modified_overlap) - exit_waves = modified_overlap - overlap + fourier_modified_overlap = ( + fourier_modified_overlap - fourier_overlap + ) * fourier_mask + exit_waves = xp.fft.ifft2(fourier_modified_overlap) # resample back to region_of_interest_shape, note: this needs to happen in real-space if self._resample_exit_waves: @@ -2627,7 +2658,14 @@ def _gradient_descent_fourier_projection(self, amplitudes, overlap): return exit_waves, error def _projection_sets_fourier_projection( - self, amplitudes, overlap, exit_waves, projection_a, projection_b, projection_c + self, + amplitudes, + overlap, + exit_waves, + fourier_mask, + projection_a, + projection_b, + projection_c, ): """ Ptychographic fourier projection method for DM_AP and RAAR methods. @@ -2652,6 +2690,10 @@ def _projection_sets_fourier_projection( object * probe overlap exit_waves: np.ndarray previously estimated exit waves + fourier_mask: np.ndarray + Mask to apply at the detector-plane for zeroing-out unreliable gradients + Useful when detector has artifacts such as dead-pixels + Currently not implemented for projection sets projection_a: float projection_b: float projection_c: float @@ -2664,6 +2706,9 @@ def _projection_sets_fourier_projection( Reconstruction error """ + if fourier_mask is not None: + raise NotImplementedError() + xp = self._xp projection_x = 1 - projection_a - projection_b projection_y = 1 - projection_c diff --git a/py4DSTEM/process/phase/ptychographic_tomography.py b/py4DSTEM/process/phase/ptychographic_tomography.py index 380c0bd20..6309658c2 100644 --- a/py4DSTEM/process/phase/ptychographic_tomography.py +++ b/py4DSTEM/process/phase/ptychographic_tomography.py @@ -771,6 +771,7 @@ def reconstruct( object_positivity: bool = True, shrinkage_rad: float = 0.0, fix_potential_baseline: bool = True, + detector_fourier_mask: np.ndarray = None, tv_denoise: bool = True, tv_denoise_weights: float = None, tv_denoise_inner_iter=40, @@ -879,6 +880,11 @@ def reconstruct( if True perform collective measurement updates (i.e. one per tilt) shrinkage_rad: float Phase shift in radians to be subtracted from the potential at each iteration + fix_potential_baseline: bool + If true, the potential mean outside the FOV is forced to zero at each iteration + detector_fourier_mask: np.ndarray + Corner-centered mask to apply at the detector-plane for zeroing-out unreliable gradients. + Useful when detector has artifacts such as dead-pixels. Usually binary. store_iterations: bool, optional If True, reconstructed objects and probes are stored at each iteration progress_bar: bool, optional @@ -951,6 +957,11 @@ def reconstruct( else: max_batch_size = self._num_diffraction_patterns + if detector_fourier_mask is None: + detector_fourier_mask = xp.ones(self._amplitudes[0].shape) + else: + detector_fourier_mask = xp.asarray(detector_fourier_mask) + # initialization self._reset_reconstruction(store_iterations, reset, use_projection_scheme) @@ -1045,6 +1056,7 @@ def reconstruct( positions_px_fractional, amplitudes_device, self._exit_waves, + detector_fourier_mask, use_projection_scheme, projection_a, projection_b, diff --git a/py4DSTEM/process/phase/singleslice_ptychography.py b/py4DSTEM/process/phase/singleslice_ptychography.py index 8d8602320..7504d8835 100644 --- a/py4DSTEM/process/phase/singleslice_ptychography.py +++ b/py4DSTEM/process/phase/singleslice_ptychography.py @@ -620,6 +620,7 @@ def reconstruct( object_positivity: bool = True, shrinkage_rad: float = 0.0, fix_potential_baseline: bool = True, + detector_fourier_mask: np.ndarray = None, store_iterations: bool = False, progress_bar: bool = True, reset: bool = None, @@ -726,6 +727,9 @@ def reconstruct( Phase shift in radians to be subtracted from the potential at each iteration fix_potential_baseline: bool If true, the potential mean outside the FOV is forced to zero at each iteration + detector_fourier_mask: np.ndarray + Corner-centered mask to apply at the detector-plane for zeroing-out unreliable gradients. + Useful when detector has artifacts such as dead-pixels. Usually binary. store_iterations: bool, optional If True, reconstructed objects and probes are stored at each iteration progress_bar: bool, optional @@ -761,6 +765,7 @@ def reconstruct( if object_type is not None: self._switch_object_type(object_type) + xp = self._xp xp_storage = self._xp_storage device = self._device asnumpy = self._asnumpy @@ -804,6 +809,11 @@ def reconstruct( else: max_batch_size = self._num_diffraction_patterns + if detector_fourier_mask is None: + detector_fourier_mask = xp.ones(self._amplitudes[0].shape) + else: + detector_fourier_mask = xp.asarray(detector_fourier_mask) + # initialization self._reset_reconstruction(store_iterations, reset) @@ -853,6 +863,7 @@ def reconstruct( positions_px_fractional, amplitudes_device, self._exit_waves, + detector_fourier_mask, use_projection_scheme, projection_a, projection_b, From 99c70adc7d6ef04500fbcac6c818a88b99faa5d2 Mon Sep 17 00:00:00 2001 From: Stephanie Ribet Date: Fri, 9 Feb 2024 18:06:08 -0800 Subject: [PATCH 23/50] bug fix for filtering --- py4DSTEM/preprocess/preprocess.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/py4DSTEM/preprocess/preprocess.py b/py4DSTEM/preprocess/preprocess.py index e9ed820fc..41ddd7fd4 100644 --- a/py4DSTEM/preprocess/preprocess.py +++ b/py4DSTEM/preprocess/preprocess.py @@ -514,10 +514,10 @@ def median_filter_masked_pixels(datacube, mask, kernel_width: int = 3): if y_min < 0: y_min = 0 - if x_max > datacube.Rshape[0]: - x_max = datacube.Rshape[0] - if y_max > datacube.Rshape[1]: - y_max = datacube.Rshape[1] + if x_max > datacube.Qshape[0]: + x_max = datacube.Qshape[0] + if y_max > datacube.Qshape[1]: + y_max = datacube.Qshape[1] datacube.data[:, :, index_x, index_y] = np.median( datacube.data[:, :, x_min:x_max, y_min:y_max], axis=(2, 3) From 2eae7d82dc8772f43ad0adf82f7478ff008570d4 Mon Sep 17 00:00:00 2001 From: Colin Ophus Date: Sun, 25 Feb 2024 18:00:28 -0800 Subject: [PATCH 24/50] Friedel origin finder, various FEM tools (#607) * bug fix * Revert "bug fix" This reverts commit cbce25cd754f1af68c6aded6bfe6c329425fcbae. * num_probes_fit_aberrations * flip 180 option for read arina * ptycho positions offset * remove 180 flip * median_filter_masked_pixels * save polar aberrations instead * read-write compatibility * uncertainty viz bug * adding force_com_measured functionality to ptycho * clean up force_com_measured * adding DP normalization progress bar * moving fov_mask calc to as needed * adding detector_fourier_mask * Auto center finding works! * Adding center finding without a mask * Adding plot range to show_origin_fit, fixing bug * Adding local mean / variance functions for FEM * Adding symmetry analysis and plotting * Fix divide by zero in correlation origin finding function * bug fix for filtering * adding initial commit for friedel correlation origin finder * Correlation working, but mask still buggy * Fixing the masked CC * Simplifying the expression * cleaning up - still need subpixel shifts * Update origin fitting visualization * adding device arg to upsample function * First attempt to add Friedel origin finding to ptycho GPU not yet working * Adding GPU implementation warning * parabolic subpixel fitting * minor updates * Going back to dev version of phase contrast * Changing np to xp for GPU compatibility * Fixing xp = np device options * Revering phase contrast options back to dev * Cleaning up code, fixing GPU support * black formatting * black updates * Adding annular symmetry plotting function * black formatting * cleaning typos and dead code and such * Update py4DSTEM/process/polar/polar_analysis.py Co-authored-by: Steve Zeltmann <37132012+sezelt@users.noreply.github.com> --------- Co-authored-by: Stephanie Ribet Co-authored-by: Georgios Varnavides Co-authored-by: gvarnavi Co-authored-by: SE Zeltmann Co-authored-by: Steve Zeltmann <37132012+sezelt@users.noreply.github.com> --- py4DSTEM/braggvectors/diskdetection_cuda.py | 8 +- py4DSTEM/datacube/datacube.py | 20 +- py4DSTEM/io/importfile.py | 2 +- .../io/legacy/legacy13/v13_emd_classes/io.py | 2 +- py4DSTEM/preprocess/preprocess.py | 55 ++++ py4DSTEM/process/calibration/origin.py | 165 +++++++--- .../diffraction/WK_scattering_factors.py | 4 +- py4DSTEM/process/diffraction/crystal_viz.py | 3 +- py4DSTEM/process/fit/fit.py | 3 +- .../magnetic_ptychographic_tomography.py | 48 +-- .../process/phase/magnetic_ptychography.py | 48 +-- .../mixedstate_multislice_ptychography.py | 19 +- .../process/phase/mixedstate_ptychography.py | 9 +- .../process/phase/multislice_ptychography.py | 9 +- py4DSTEM/process/phase/parallax.py | 27 +- py4DSTEM/process/phase/phase_base_class.py | 32 +- .../process/phase/ptychographic_methods.py | 4 +- .../process/phase/ptychographic_tomography.py | 48 +-- .../phase/ptychographic_visualizations.py | 8 +- .../process/phase/singleslice_ptychography.py | 9 +- py4DSTEM/process/phase/utils.py | 4 +- py4DSTEM/process/polar/polar_analysis.py | 291 +++++++++++++++++- py4DSTEM/process/polar/polar_datacube.py | 3 +- py4DSTEM/process/utils/utils.py | 18 +- py4DSTEM/process/wholepatternfit/wp_models.py | 16 +- py4DSTEM/visualize/vis_grid.py | 6 +- py4DSTEM/visualize/vis_special.py | 53 +++- 27 files changed, 701 insertions(+), 213 deletions(-) diff --git a/py4DSTEM/braggvectors/diskdetection_cuda.py b/py4DSTEM/braggvectors/diskdetection_cuda.py index 870b8303d..ddea4d9ad 100644 --- a/py4DSTEM/braggvectors/diskdetection_cuda.py +++ b/py4DSTEM/braggvectors/diskdetection_cuda.py @@ -156,9 +156,11 @@ def find_Bragg_disks_CUDA( patt_idx = batch_idx * batch_size + subbatch_idx rx, ry = np.unravel_index(patt_idx, (datacube.R_Nx, datacube.R_Ny)) batched_subcube[subbatch_idx, :, :] = cp.array( - datacube.data[rx, ry, :, :] - if filter_function is None - else filter_function(datacube.data[rx, ry, :, :]), + ( + datacube.data[rx, ry, :, :] + if filter_function is None + else filter_function(datacube.data[rx, ry, :, :]) + ), dtype=cp.float32, ) diff --git a/py4DSTEM/datacube/datacube.py b/py4DSTEM/datacube/datacube.py index 4d87afdd5..77c125721 100644 --- a/py4DSTEM/datacube/datacube.py +++ b/py4DSTEM/datacube/datacube.py @@ -479,13 +479,29 @@ def filter_hot_pixels(self, thresh, ind_compare=1, return_mask=False): """ from py4DSTEM.preprocess import filter_hot_pixels - d = filter_hot_pixels( + datacube = filter_hot_pixels( self, thresh, ind_compare, return_mask, ) - return d + return datacube + + def median_filter_masked_pixels(self, mask, kernel_width: int = 3): + """ + This function fixes a datacube where the same pixels are consistently + bad. It requires a mask that identifies all the bad pixels in the dataset. + Then for each diffraction pattern, a median kernel is applied around each + bad pixel with the specified width. + """ + from py4DSTEM.preprocess import median_filter_masked_pixels + + datacube = median_filter_masked_pixels( + self, + mask, + kernel_width, + ) + return datacube # Probe diff --git a/py4DSTEM/io/importfile.py b/py4DSTEM/io/importfile.py index 20a3759a2..ff3d1c37c 100644 --- a/py4DSTEM/io/importfile.py +++ b/py4DSTEM/io/importfile.py @@ -75,7 +75,7 @@ def import_file( "gatan_K2_bin", "mib", "arina", - "abTEM" + "abTEM", # "kitware_counted", ], "Error: filetype not recognized" diff --git a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/io.py b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/io.py index e1b7ab241..ddbea9005 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/io.py +++ b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/io.py @@ -361,7 +361,7 @@ def Array_to_h5(array, group): data = grp.create_dataset( "data", shape=array.data.shape, - data=array.data + data=array.data, # dtype = type(array.data) ) data.attrs.create( diff --git a/py4DSTEM/preprocess/preprocess.py b/py4DSTEM/preprocess/preprocess.py index 9db7895d3..41ddd7fd4 100644 --- a/py4DSTEM/preprocess/preprocess.py +++ b/py4DSTEM/preprocess/preprocess.py @@ -470,6 +470,61 @@ def filter_hot_pixels(datacube, thresh, ind_compare=1, return_mask=False): return datacube +def median_filter_masked_pixels(datacube, mask, kernel_width: int = 3): + """ + This function fixes a datacube where the same pixels are consistently + bad. It requires a mask that identifies all the bad pixels in the dataset. + Then for each diffraction pattern, a median kernel is applied around each + bad pixel with the specified width. + + Parameters + ---------- + datacube: + Datacube to be filtered + mask: + a boolean mask that specifies the bad pixels in the datacube + kernel_width (optional): + specifies the width of the median kernel + + Returns + ---------- + filtered datacube + """ + if kernel_width % 2 == 0: + width_max = kernel_width // 2 + width_min = kernel_width // 2 + + else: + width_max = int(kernel_width / 2 + 0.5) + width_min = int(kernel_width / 2 - 0.5) + + num_bad_pixels_indicies = np.array(np.where(mask)) + for a0 in range(num_bad_pixels_indicies.shape[1]): + index_x = num_bad_pixels_indicies[0, a0] + index_y = num_bad_pixels_indicies[1, a0] + + x_min = index_x - width_min + y_min = index_y - width_min + + x_max = index_x + width_max + y_max = index_y + width_max + + if x_min < 0: + x_min = 0 + if y_min < 0: + y_min = 0 + + if x_max > datacube.Qshape[0]: + x_max = datacube.Qshape[0] + if y_max > datacube.Qshape[1]: + y_max = datacube.Qshape[1] + + datacube.data[:, :, index_x, index_y] = np.median( + datacube.data[:, :, x_min:x_max, y_min:y_max], axis=(2, 3) + ) + return datacube + + def datacube_diffraction_shift( datacube, xshifts, diff --git a/py4DSTEM/process/calibration/origin.py b/py4DSTEM/process/calibration/origin.py index a0717e321..7f0c07a81 100644 --- a/py4DSTEM/process/calibration/origin.py +++ b/py4DSTEM/process/calibration/origin.py @@ -2,14 +2,27 @@ import functools import numpy as np +import matplotlib.pyplot as plt from scipy.ndimage import gaussian_filter from scipy.optimize import leastsq +import matplotlib.pyplot as plt from emdfile import tqdmnd, PointListArray from py4DSTEM.datacube import DataCube from py4DSTEM.process.calibration.probe import get_probe_size from py4DSTEM.process.fit import plane, parabola, bezier_two, fit_2D -from py4DSTEM.process.utils import get_CoM, add_to_2D_array_from_floats, get_maxima_2D +from py4DSTEM.process.utils import ( + get_CoM, + add_to_2D_array_from_floats, + get_maxima_2D, + upsampled_correlation, +) +from py4DSTEM.process.phase.utils import copy_to_device + +try: + import cupy as cp +except (ImportError, ModuleNotFoundError): + cp = np # @@ -309,58 +322,122 @@ def get_origin( return qx0, qy0, mask -def get_origin_single_dp_beamstop(DP: np.ndarray, mask: np.ndarray, **kwargs): +def get_origin_friedel( + datacube: DataCube, + mask=None, + upsample_factor=1, + device="cpu", + return_cpu=True, +): """ - Find the origin for a single diffraction pattern, assuming there is a beam stop. - - Args: - DP (np array): diffraction pattern - mask (np array): boolean mask which is False under the beamstop and True - in the diffraction pattern. One approach to generating this mask - is to apply a suitable threshold on the average diffraction pattern - and use binary opening/closing to remove and holes - - Returns: - qx0, qy0 (tuple) measured center position of diffraction pattern + Fit the origin for each diffraction pattern, with or without a beam stop. + The method we have developed here is a heavily modified version of masked + cross correlation, where we use Friedel symmetry of the diffraction pattern + to find the common center. + + More details about how the correlation step can be found in: + https://doi.org/10.1109/TIP.2011.2181402 + + Parameters + ---------- + datacube: (DataCube) + The 4D dataset. + mask: (np array, optional) + Boolean mask which is False under the beamstop and True + in the diffraction pattern. One approach to generating this mask + is to apply a suitable threshold on the average diffraction pattern + and use binary opening/closing to remove any holes. + If no mask is provided, this method will likely not work with a beamstop. + upsample_factor: (int) + Upsample factor for subpixel fitting of the image shifts. + device: string + 'cpu' or 'gpu' to select device + return_cpu: bool + Return arrays on cpu. + + + Returns + ------- + qx0, qy0 + (tuple of np arrays) measured center position of each diffraction pattern """ - imCorr = np.real( - np.fft.ifft2( - np.fft.fft2(DP * mask) - * np.conj(np.fft.fft2(np.rot90(DP, 2) * np.rot90(mask, 2))) + # Select device + if device == "cpu": + xp = np + elif device == "gpu": + xp = cp + + # init measurement arrays + qx0 = xp.zeros(datacube.data.shape[:2]) + qy0 = xp.zeros_like(qx0) + + # pad the mask + if mask is not None: + mask = xp.asarray(mask).astype("float") + mask_pad = xp.pad( + mask, + ((0, datacube.data.shape[2]), (0, datacube.data.shape[3])), + constant_values=(1.0, 1.0), ) - ) + M = xp.fft.fft2(mask_pad) - xp, yp = np.unravel_index(np.argmax(imCorr), imCorr.shape) - - dx = ((xp + DP.shape[0] / 2) % DP.shape[0]) - DP.shape[0] / 2 - dy = ((yp + DP.shape[1] / 2) % DP.shape[1]) - DP.shape[1] / 2 - - return (DP.shape[0] + dx) / 2, (DP.shape[1] + dy) / 2 + # main loop over all probe positions + for rx, ry in tqdmnd(datacube.R_Nx, datacube.R_Ny): + if mask is None: + # pad image + im_xp = xp.asarray(datacube.data[rx, ry]) + im = xp.pad( + im_xp, + ((0, datacube.data.shape[2]), (0, datacube.data.shape[3])), + ) + G = xp.fft.fft2(im) + # Cross correlation of masked image with its inverse + cc = xp.real(xp.fft.ifft2(G**2)) -def get_origin_beamstop(datacube: DataCube, mask: np.ndarray, **kwargs): - """ - Find the origin for each diffraction pattern, assuming there is a beam stop. - - Args: - datacube (DataCube) - mask (np array): boolean mask which is False under the beamstop and True - in the diffraction pattern. One approach to generating this mask - is to apply a suitable threshold on the average diffraction pattern - and use binary opening/closing to remove any holes + else: + im_xp = xp.asarray(datacube.data[rx, ry, :, :]) + im = xp.pad( + im_xp, + ((0, datacube.data.shape[2]), (0, datacube.data.shape[3])), + ) - Returns: - qx0, qy0 (tuple of np arrays) measured center position of each diffraction pattern - """ + # Masked cross correlation of masked image with its inverse + term1 = xp.real(xp.fft.ifft2(xp.fft.fft2(im) ** 2) * xp.fft.ifft2(M**2)) + term2 = xp.real(xp.fft.ifft2(xp.fft.fft2(im**2) * M)) + term3 = xp.real(xp.fft.ifft2(xp.fft.fft2(im * mask_pad))) + cc = (term1 - term3) / (term2 - term3) - qx0 = np.zeros(datacube.data.shape[:2]) - qy0 = np.zeros_like(qx0) + # get correlation peak + x, y = xp.unravel_index(xp.argmax(cc), im.shape) - for rx, ry in tqdmnd(datacube.R_Nx, datacube.R_Ny): - x, y = get_origin_single_dp_beamstop(datacube.data[rx, ry, :, :], mask) + # half pixel upsampling / parabola subpixel fitting + dx = (cc[x + 1, y] - cc[x - 1, y]) / ( + 4.0 * cc[x, y] - 2.0 * cc[x + 1, y] - 2.0 * cc[x - 1, y] + ) + dy = (cc[x, y + 1] - cc[x, y - 1]) / ( + 4.0 * cc[x, y] - 2.0 * cc[x, y + 1] - 2.0 * cc[x, y - 1] + ) + # xp += np.round(dx*2.0)/2.0 + # yp += np.round(dy*2.0)/2.0 + x = x.astype("float") + dx + y = y.astype("float") + dy + + # upsample peak if needed + if upsample_factor > 1: + x, y = upsampled_correlation( + xp.fft.fft2(cc), + upsampleFactor=upsample_factor, + xyShift=xp.array((x, y)), + device=device, + ) - qx0[rx, ry] = x - qy0[rx, ry] = y + # Correlation peak, moved to image center shift + qx0[rx, ry] = (x / 2) % datacube.data.shape[2] + qy0[rx, ry] = (y / 2) % datacube.data.shape[3] - return qx0, qy0 + if return_cpu: + return copy_to_device(qx0), copy_to_device(qy0) + else: + return qx0, qy0 diff --git a/py4DSTEM/process/diffraction/WK_scattering_factors.py b/py4DSTEM/process/diffraction/WK_scattering_factors.py index 70110a977..eb964de96 100644 --- a/py4DSTEM/process/diffraction/WK_scattering_factors.py +++ b/py4DSTEM/process/diffraction/WK_scattering_factors.py @@ -221,9 +221,7 @@ def RI1(BI, BJ, G): ri1[sub] = np.pi * (BI * np.log((BI + BJ) / BI) + BJ * np.log((BI + BJ) / BJ)) sub = np.logical_and(eps <= 0.1, G > 0.0) - temp = 0.5 * BI**2 * np.log(BI / (BI + BJ)) + 0.5 * BJ**2 * np.log( - BJ / (BI + BJ) - ) + temp = 0.5 * BI**2 * np.log(BI / (BI + BJ)) + 0.5 * BJ**2 * np.log(BJ / (BI + BJ)) temp += 0.75 * (BI**2 + BJ**2) - 0.25 * (BI + BJ) ** 2 temp -= 0.5 * (BI - BJ) ** 2 ri1[sub] += np.pi * G[sub] ** 2 * temp diff --git a/py4DSTEM/process/diffraction/crystal_viz.py b/py4DSTEM/process/diffraction/crystal_viz.py index da016b3ed..47df2e6ca 100644 --- a/py4DSTEM/process/diffraction/crystal_viz.py +++ b/py4DSTEM/process/diffraction/crystal_viz.py @@ -454,8 +454,7 @@ def plot_scattering_intensity( int_sf_plot = calc_1D_profile( k, self.g_vec_leng, - (self.struct_factors_int**int_power_scale) - * (self.g_vec_leng**k_power_scale), + (self.struct_factors_int**int_power_scale) * (self.g_vec_leng**k_power_scale), remove_origin=True, k_broadening=k_broadening, int_scale=int_scale, diff --git a/py4DSTEM/process/fit/fit.py b/py4DSTEM/process/fit/fit.py index 9973ff79f..5c2d56a3c 100644 --- a/py4DSTEM/process/fit/fit.py +++ b/py4DSTEM/process/fit/fit.py @@ -169,8 +169,7 @@ def polar_gaussian_2D( # t2 = np.min(np.vstack([t,1-t])) t2 = np.square(t - mu_t) return ( - I0 * np.exp(-(t2 / (2 * sigma_t**2) + (q - mu_q) ** 2 / (2 * sigma_q**2))) - + C + I0 * np.exp(-(t2 / (2 * sigma_t**2) + (q - mu_q) ** 2 / (2 * sigma_q**2))) + C ) diff --git a/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py b/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py index 7249a9064..60cec9742 100644 --- a/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py +++ b/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py @@ -1201,20 +1201,20 @@ def reconstruct( # position correction if not fix_positions and a0 > 0: - self._positions_px_all[ - batch_indices - ] = self._position_correction( - object_sliced, - vectorized_patch_indices_row, - vectorized_patch_indices_col, - shifted_probes, - overlap, - amplitudes_device, - positions_px, - positions_px_initial, - positions_step_size, - max_position_update_distance, - max_position_total_distance, + self._positions_px_all[batch_indices] = ( + self._position_correction( + object_sliced, + vectorized_patch_indices_row, + vectorized_patch_indices_col, + shifted_probes, + overlap, + amplitudes_device, + positions_px, + positions_px_initial, + positions_step_size, + max_position_update_distance, + max_position_total_distance, + ) ) measurement_error += batch_error @@ -1320,10 +1320,12 @@ def reconstruct( butterworth_order=butterworth_order, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline - and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), tv_denoise=tv_denoise and tv_denoise_weights is not None, tv_denoise_weights=tv_denoise_weights, tv_denoise_inner_iter=tv_denoise_inner_iter, @@ -1353,10 +1355,12 @@ def reconstruct( butterworth_order=butterworth_order, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline - and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), tv_denoise=tv_denoise and tv_denoise_weights is not None, tv_denoise_weights=tv_denoise_weights, tv_denoise_inner_iter=tv_denoise_inner_iter, diff --git a/py4DSTEM/process/phase/magnetic_ptychography.py b/py4DSTEM/process/phase/magnetic_ptychography.py index d718b1a9e..9be8144f4 100644 --- a/py4DSTEM/process/phase/magnetic_ptychography.py +++ b/py4DSTEM/process/phase/magnetic_ptychography.py @@ -1447,20 +1447,20 @@ def reconstruct( # position correction if not fix_positions and a0 > 0: - self._positions_px_all[ - batch_indices - ] = self._position_correction( - self._object, - vectorized_patch_indices_row, - vectorized_patch_indices_col, - shifted_probes, - overlap, - amplitudes_device, - positions_px, - positions_px_initial, - positions_step_size, - max_position_update_distance, - max_position_total_distance, + self._positions_px_all[batch_indices] = ( + self._position_correction( + self._object, + vectorized_patch_indices_row, + vectorized_patch_indices_col, + shifted_probes, + overlap, + amplitudes_device, + positions_px, + positions_px_initial, + positions_step_size, + max_position_update_distance, + max_position_total_distance, + ) ) measurement_error += batch_error @@ -1555,10 +1555,12 @@ def reconstruct( tv_denoise_inner_iter=tv_denoise_inner_iter, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline - and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), pure_phase_object=pure_phase_object and self._object_type == "complex", ) @@ -1588,10 +1590,12 @@ def reconstruct( tv_denoise_inner_iter=tv_denoise_inner_iter, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline - and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), pure_phase_object=pure_phase_object and self._object_type == "complex", ) diff --git a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py index 47dd67dd3..0a856fe85 100644 --- a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py @@ -1047,16 +1047,21 @@ def reconstruct( butterworth_order=butterworth_order, kz_regularization_filter=kz_regularization_filter and kz_regularization_gamma is not None, - kz_regularization_gamma=kz_regularization_gamma[a0] - if kz_regularization_gamma is not None - and isinstance(kz_regularization_gamma, np.ndarray) - else kz_regularization_gamma, + kz_regularization_gamma=( + kz_regularization_gamma[a0] + if kz_regularization_gamma is not None + and isinstance(kz_regularization_gamma, np.ndarray) + else kz_regularization_gamma + ), identical_slices=identical_slices, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), pure_phase_object=pure_phase_object and self._object_type == "complex", tv_denoise_chambolle=tv_denoise_chambolle and tv_denoise_weight_chambolle is not None, diff --git a/py4DSTEM/process/phase/mixedstate_ptychography.py b/py4DSTEM/process/phase/mixedstate_ptychography.py index 6fbb72b5d..72912a13c 100644 --- a/py4DSTEM/process/phase/mixedstate_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_ptychography.py @@ -941,9 +941,12 @@ def reconstruct( tv_denoise_inner_iter=tv_denoise_inner_iter, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), pure_phase_object=pure_phase_object and self._object_type == "complex", ) diff --git a/py4DSTEM/process/phase/multislice_ptychography.py b/py4DSTEM/process/phase/multislice_ptychography.py index 03e636f57..2b35d0673 100644 --- a/py4DSTEM/process/phase/multislice_ptychography.py +++ b/py4DSTEM/process/phase/multislice_ptychography.py @@ -1030,9 +1030,12 @@ def reconstruct( identical_slices=identical_slices, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), pure_phase_object=pure_phase_object and self._object_type == "complex", tv_denoise_chambolle=tv_denoise_chambolle and tv_denoise_weight_chambolle is not None, diff --git a/py4DSTEM/process/phase/parallax.py b/py4DSTEM/process/phase/parallax.py index 58181d812..2ba71c8f1 100644 --- a/py4DSTEM/process/phase/parallax.py +++ b/py4DSTEM/process/phase/parallax.py @@ -2194,16 +2194,16 @@ def score_CTF(coefs): measured_shifts_sx = xp.zeros( self._region_of_interest_shape, dtype=xp.float32 ) - measured_shifts_sx[ - self._xy_inds[:, 0], self._xy_inds[:, 1] - ] = self._xy_shifts_Ang[:, 0] + measured_shifts_sx[self._xy_inds[:, 0], self._xy_inds[:, 1]] = ( + self._xy_shifts_Ang[:, 0] + ) measured_shifts_sy = xp.zeros( self._region_of_interest_shape, dtype=xp.float32 ) - measured_shifts_sy[ - self._xy_inds[:, 0], self._xy_inds[:, 1] - ] = self._xy_shifts_Ang[:, 1] + measured_shifts_sy[self._xy_inds[:, 0], self._xy_inds[:, 1]] = ( + self._xy_shifts_Ang[:, 1] + ) fitted_shifts = ( xp.tensordot(gradients, xp.array(self._aberrations_coefs), axes=1) @@ -2214,16 +2214,16 @@ def score_CTF(coefs): fitted_shifts_sx = xp.zeros( self._region_of_interest_shape, dtype=xp.float32 ) - fitted_shifts_sx[ - self._xy_inds[:, 0], self._xy_inds[:, 1] - ] = fitted_shifts[:, 0] + fitted_shifts_sx[self._xy_inds[:, 0], self._xy_inds[:, 1]] = ( + fitted_shifts[:, 0] + ) fitted_shifts_sy = xp.zeros( self._region_of_interest_shape, dtype=xp.float32 ) - fitted_shifts_sy[ - self._xy_inds[:, 0], self._xy_inds[:, 1] - ] = fitted_shifts[:, 1] + fitted_shifts_sy[self._xy_inds[:, 0], self._xy_inds[:, 1]] = ( + fitted_shifts[:, 1] + ) max_shift = xp.max( xp.array( @@ -2520,8 +2520,7 @@ def aberration_correct( SNR_inv = ( xp.sqrt( 1 - + (kra2**k_info_power) - / ((k_info_limit) ** (2 * k_info_power)) + + (kra2**k_info_power) / ((k_info_limit) ** (2 * k_info_power)) ) / Wiener_signal_noise_ratio ) diff --git a/py4DSTEM/process/phase/phase_base_class.py b/py4DSTEM/process/phase/phase_base_class.py index a67640d9b..a57568f48 100644 --- a/py4DSTEM/process/phase/phase_base_class.py +++ b/py4DSTEM/process/phase/phase_base_class.py @@ -997,17 +997,21 @@ def _solve_for_center_of_mass_relative_rotation( if _rotation_best_transpose: ax.plot( rotation_angles_deg, - asnumpy(rotation_div_transpose) - if maximize_divergence - else asnumpy(rotation_curl_transpose), + ( + asnumpy(rotation_div_transpose) + if maximize_divergence + else asnumpy(rotation_curl_transpose) + ), label="CoM after transpose", ) else: ax.plot( rotation_angles_deg, - asnumpy(rotation_div) - if maximize_divergence - else asnumpy(rotation_curl), + ( + asnumpy(rotation_div) + if maximize_divergence + else asnumpy(rotation_curl) + ), label="CoM", ) @@ -1159,16 +1163,20 @@ def _solve_for_center_of_mass_relative_rotation( ax.plot( rotation_angles_deg, - asnumpy(rotation_div) - if maximize_divergence - else asnumpy(rotation_curl), + ( + asnumpy(rotation_div) + if maximize_divergence + else asnumpy(rotation_curl) + ), label="CoM", ) ax.plot( rotation_angles_deg, - asnumpy(rotation_div_transpose) - if maximize_divergence - else asnumpy(rotation_curl_transpose), + ( + asnumpy(rotation_div_transpose) + if maximize_divergence + else asnumpy(rotation_curl_transpose) + ), label="CoM after transpose", ) y_r = ax.get_ylim() diff --git a/py4DSTEM/process/phase/ptychographic_methods.py b/py4DSTEM/process/phase/ptychographic_methods.py index 6ab349f30..a1cef5c56 100644 --- a/py4DSTEM/process/phase/ptychographic_methods.py +++ b/py4DSTEM/process/phase/ptychographic_methods.py @@ -345,9 +345,7 @@ def _precompute_propagator_arrays( propagators[i] = xp.exp( 1.0j * (-(kx**2)[:, None] * np.pi * wavelength * dz) ) - propagators[i] *= xp.exp( - 1.0j * (-(ky**2)[None] * np.pi * wavelength * dz) - ) + propagators[i] *= xp.exp(1.0j * (-(ky**2)[None] * np.pi * wavelength * dz)) if theta_x is not None: propagators[i] *= xp.exp( diff --git a/py4DSTEM/process/phase/ptychographic_tomography.py b/py4DSTEM/process/phase/ptychographic_tomography.py index b4a29fa5d..f5d825f55 100644 --- a/py4DSTEM/process/phase/ptychographic_tomography.py +++ b/py4DSTEM/process/phase/ptychographic_tomography.py @@ -1093,20 +1093,20 @@ def reconstruct( # position correction if not fix_positions: - self._positions_px_all[ - batch_indices - ] = self._position_correction( - object_sliced, - vectorized_patch_indices_row, - vectorized_patch_indices_col, - shifted_probes, - overlap, - amplitudes_device, - positions_px, - positions_px_initial, - positions_step_size, - max_position_update_distance, - max_position_total_distance, + self._positions_px_all[batch_indices] = ( + self._position_correction( + object_sliced, + vectorized_patch_indices_row, + vectorized_patch_indices_col, + shifted_probes, + overlap, + amplitudes_device, + positions_px, + positions_px_initial, + positions_step_size, + max_position_update_distance, + max_position_total_distance, + ) ) measurement_error += batch_error @@ -1206,10 +1206,12 @@ def reconstruct( butterworth_order=butterworth_order, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline - and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), tv_denoise=tv_denoise and tv_denoise_weights is not None, tv_denoise_weights=tv_denoise_weights, tv_denoise_inner_iter=tv_denoise_inner_iter, @@ -1236,10 +1238,12 @@ def reconstruct( butterworth_order=butterworth_order, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline - and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), tv_denoise=tv_denoise and tv_denoise_weights is not None, tv_denoise_weights=tv_denoise_weights, tv_denoise_inner_iter=tv_denoise_inner_iter, diff --git a/py4DSTEM/process/phase/ptychographic_visualizations.py b/py4DSTEM/process/phase/ptychographic_visualizations.py index 58dd224cf..dd1732ae0 100644 --- a/py4DSTEM/process/phase/ptychographic_visualizations.py +++ b/py4DSTEM/process/phase/ptychographic_visualizations.py @@ -384,9 +384,11 @@ def _visualize_all_iterations( grid = ImageGrid( fig, spec[0], - nrows_ncols=(1, iterations_grid[1]) - if (plot_probe or plot_fourier_probe) - else iterations_grid, + nrows_ncols=( + (1, iterations_grid[1]) + if (plot_probe or plot_fourier_probe) + else iterations_grid + ), axes_pad=(0.75, 0.5) if cbar else 0.5, cbar_mode="each" if cbar else None, cbar_pad="2.5%" if cbar else None, diff --git a/py4DSTEM/process/phase/singleslice_ptychography.py b/py4DSTEM/process/phase/singleslice_ptychography.py index 71fe65cd7..1e4e75340 100644 --- a/py4DSTEM/process/phase/singleslice_ptychography.py +++ b/py4DSTEM/process/phase/singleslice_ptychography.py @@ -916,9 +916,12 @@ def reconstruct( tv_denoise_inner_iter=tv_denoise_inner_iter, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), pure_phase_object=pure_phase_object and self._object_type == "complex", ) diff --git a/py4DSTEM/process/phase/utils.py b/py4DSTEM/process/phase/utils.py index a25b7acd3..98d8fdd49 100644 --- a/py4DSTEM/process/phase/utils.py +++ b/py4DSTEM/process/phase/utils.py @@ -203,9 +203,7 @@ def evaluate_gaussian_envelope( self, alpha: Union[float, np.ndarray] ) -> Union[float, np.ndarray]: xp = self._xp - return xp.exp( - -0.5 * self._gaussian_spread**2 * alpha**2 / self._wavelength**2 - ) + return xp.exp(-0.5 * self._gaussian_spread**2 * alpha**2 / self._wavelength**2) def evaluate_spatial_envelope( self, alpha: Union[float, np.ndarray], phi: Union[float, np.ndarray] diff --git a/py4DSTEM/process/polar/polar_analysis.py b/py4DSTEM/process/polar/polar_analysis.py index 7df00a235..c62d1258d 100644 --- a/py4DSTEM/process/polar/polar_analysis.py +++ b/py4DSTEM/process/polar/polar_analysis.py @@ -584,8 +584,12 @@ def plot_pdf( def calculate_FEM_local( self, - figsize=(8, 6), + use_median=False, + plot_normalized_variance=True, + figsize=(8, 4), + return_values=False, returnfig=False, + progress_bar=True, ): """ Calculate fluctuation electron microscopy (FEM) statistics, including radial mean, @@ -596,18 +600,293 @@ def calculate_FEM_local( -------- self: PolarDatacube Polar datacube used for measuring FEM properties. + use_median: Bool + Use median instead of mean for statistics. Returns -------- - radial_avg: np.array - Average radial intensity - radial_var: np.array - Variance in the radial dimension + local_radial_mean: np.array + Average radial intensity of each probe position + local_radial_var: np.array + Variance in the radial dimension of each probe position + """ + # init radial data arrays + self.local_radial_mean = np.zeros( + ( + self._datacube.shape[0], + self._datacube.shape[1], + self.polar_shape[1], + ) + ) + self.local_radial_var = np.zeros( + ( + self._datacube.shape[0], + self._datacube.shape[1], + self.polar_shape[1], + ) + ) + + # Compute the radial mean and standard deviation for each probe position + for rx, ry in tqdmnd( + self._datacube.shape[0], + self._datacube.shape[1], + desc="Radial statistics", + unit=" probe positions", + disable=not progress_bar, + ): + im = self.data[rx, ry] + + if use_median: + im_mean = np.ma.median(im, axis=0) + im_var = np.ma.median((im - im_mean) ** 2, axis=0) + else: + im_mean = np.ma.mean(im, axis=0) + im_var = np.ma.mean((im - im_mean) ** 2, axis=0) + + self.local_radial_mean[rx, ry] = im_mean + self.local_radial_var[rx, ry] = im_var + + if plot_normalized_variance: + fig, ax = plt.subplots(figsize=figsize) + + sig = self.local_radial_var / self.local_radial_mean**2 + if use_median: + sig_plot = np.median(sig, axis=(0, 1)) + else: + sig_plot = np.mean(sig, axis=(0, 1)) + + ax.plot( + self.qq, + sig_plot, + ) + ax.set_xlabel( + "Scattering Vector (" + self.calibration.get_Q_pixel_units() + ")" + ) + ax.set_ylabel("Normalized Variance") + ax.set_xlim((self.qq[0], self.qq[-1])) + + if return_values: + if returnfig: + return self.local_radial_mean, self.local_radial_var, fig, ax + else: + return self.local_radial_mean, self.local_radial_var + else: + if returnfig: + return fig, ax + + +def calculate_annular_symmetry( + self, + max_symmetry=12, + mask_realspace=None, + plot_result=False, + figsize=(8, 4), + return_symmetry_map=False, + progress_bar=True, +): """ + This function calculates radial symmetry of diffraction patterns, typically applied + to amorphous scattering, but it can also be used for crystalline Bragg diffraction. - pass + Parameters + -------- + self: PolarDatacube + Polar transformed datacube + max_symmetry: int + Symmetry orders will be computed from 1 to max_symmetry for n-fold symmetry orders. + mask_realspace: np.array + Boolean mask, symmetries will only be computed at probe positions where mask is True. + plot_result: bool + Plot the resulting array + figsize: (float, float) + Size of the plot. + return_symmetry_map: bool + Set to true to return the symmetry array. + progress_bar: bool + Show progress bar during calculation. + + Returns + -------- + annular_symmetry: np.array + Array with annular symmetry magnitudes, with shape [max_symmetry, num_radial_bins] + + """ + + # Initialize outputs + self.annular_symmetry_max = max_symmetry + self.annular_symmetry = np.zeros( + ( + self.data_raw.shape[0], + self.data_raw.shape[1], + max_symmetry, + self.polar_shape[1], + ) + ) + + # Loop over all probe positions + for rx, ry in tqdmnd( + self._datacube.shape[0], + self._datacube.shape[1], + desc="Annular symmetry", + unit=" probe positions", + disable=not progress_bar, + ): + # Get polar transformed image + im = self.transform( + self.data_raw.data[rx, ry], + ) + polar_im = np.ma.getdata(im) + polar_mask = np.ma.getmask(im) + polar_im[polar_mask] = 0 + polar_mask = np.logical_not(polar_mask) + + # Calculate normalized correlation of polar image along angular direction (axis = 0) + polar_corr = np.real( + np.fft.ifft( + np.abs( + np.fft.fft( + polar_im, + axis=0, + ) + ) + ** 2, + axis=0, + ), + ) + polar_corr_norm = ( + np.sum( + polar_im, + axis=0, + ) + ** 2 + ) + sub = polar_corr_norm > 0 + polar_corr[:, sub] /= polar_corr_norm[ + sub + ] # gets rid of divide by 0 (False near center) + polar_corr[:, sub] -= 1 + + # Calculate normalized correlation of polar mask along angular direction (axis = 0) + mask_corr = np.real( + np.fft.ifft( + np.abs( + np.fft.fft( + polar_mask.astype("float"), + axis=0, + ) + ) + ** 2, + axis=0, + ), + ) + mask_corr_norm = ( + np.sum( + polar_mask.astype("float"), + axis=0, + ) + ** 2 + ) + sub = mask_corr_norm > 0 + mask_corr[:, sub] /= mask_corr_norm[ + sub + ] # gets rid of divide by 0 (False near center) + mask_corr[:, sub] -= 1 + + # Normalize polar correlation by mask correlation (beam stop removal) + sub = np.abs(mask_corr) > 0 + polar_corr[sub] -= mask_corr[sub] + + # Measure symmetry + self.annular_symmetry[rx, ry, :, :] = np.abs(np.fft.fft(polar_corr, axis=0))[ + 1 : max_symmetry + 1 + ] + + if plot_result: + fig, ax = plt.subplots(figsize=figsize) + ax.imshow( + np.mean(self.annular_symmetry, axis=(0, 1)), + aspect="auto", + extent=[ + self.qq[0], + self.qq[-1], + max_symmetry, + 0, + ], + ) + ax.set_yticks( + np.arange(max_symmetry) + 0.5, + range(1, max_symmetry + 1), + ) + ax.set_xlabel("Scattering angle (1/Å)") + ax.set_ylabel("Symmetry Order") + + if return_symmetry_map: + return self.annular_symmetry + + +def plot_annular_symmetry( + self, + symmetry_orders=None, + plot_std=False, + normalize_by_mean=False, + cmap="turbo", + vmin=0.01, + vmax=0.99, + figsize=(8, 4), +): + """ + Plot the symmetry orders + """ + + if symmetry_orders is None: + symmetry_orders = np.arange(1, self.annular_symmetry_max + 1) + else: + symmetry_orders = np.array(symmetry_orders) + + # plotting image + if plot_std: + im_plot = np.std( + self.annular_symmetry, + axis=(0, 1), + )[symmetry_orders - 1, :] + else: + im_plot = np.mean( + self.annular_symmetry, + axis=(0, 1), + )[symmetry_orders - 1, :] + if normalize_by_mean: + im_plot /= self.radial_mean[None, :] + + # plotting range + int_vals = np.sort(im_plot.ravel()) + ind0 = np.clip(np.round(im_plot.size * vmin).astype("int"), 0, im_plot.size - 1) + ind1 = np.clip(np.round(im_plot.size * vmax).astype("int"), 0, im_plot.size - 1) + vmin = int_vals[ind0] + vmax = int_vals[ind1] + + # plot image + fig, ax = plt.subplots(figsize=figsize) + ax.imshow( + im_plot, + aspect="auto", + extent=[ + self.qq[0], + self.qq[-1], + np.max(symmetry_orders), + np.min(symmetry_orders) - 1, + ], + cmap=cmap, + vmin=vmin, + vmax=vmax, + ) + ax.set_yticks( + symmetry_orders - 0.5, + symmetry_orders, + ) + ax.set_xlabel("Scattering angle (1/A)") + ax.set_ylabel("Symmetry Order") def scattering_model(k2, *coefs): diff --git a/py4DSTEM/process/polar/polar_datacube.py b/py4DSTEM/process/polar/polar_datacube.py index 4c06d4c09..0ba284ada 100644 --- a/py4DSTEM/process/polar/polar_datacube.py +++ b/py4DSTEM/process/polar/polar_datacube.py @@ -4,7 +4,6 @@ class PolarDatacube: - """ An interface to a 4D-STEM datacube under polar-elliptical transformation. """ @@ -97,8 +96,10 @@ def __init__( calculate_radial_statistics, calculate_pair_dist_function, calculate_FEM_local, + calculate_annular_symmetry, plot_radial_mean, plot_radial_var_norm, + plot_annular_symmetry, plot_background_fits, plot_sf_estimate, plot_reduced_pdf, diff --git a/py4DSTEM/process/utils/utils.py b/py4DSTEM/process/utils/utils.py index e2bf4307c..aa49382b5 100644 --- a/py4DSTEM/process/utils/utils.py +++ b/py4DSTEM/process/utils/utils.py @@ -103,12 +103,7 @@ def electron_wavelength_angstrom(E_eV): c = 299792458 h = 6.62607 * 10**-34 - lam = ( - h - / ma.sqrt(2 * m * e * E_eV) - / ma.sqrt(1 + e * E_eV / 2 / m / c**2) - * 10**10 - ) + lam = h / ma.sqrt(2 * m * e * E_eV) / ma.sqrt(1 + e * E_eV / 2 / m / c**2) * 10**10 return lam @@ -117,15 +112,8 @@ def electron_interaction_parameter(E_eV): e = 1.602177 * 10**-19 c = 299792458 h = 6.62607 * 10**-34 - lam = ( - h - / ma.sqrt(2 * m * e * E_eV) - / ma.sqrt(1 + e * E_eV / 2 / m / c**2) - * 10**10 - ) - sigma = ( - (2 * np.pi / lam / E_eV) * (m * c**2 + e * E_eV) / (2 * m * c**2 + e * E_eV) - ) + lam = h / ma.sqrt(2 * m * e * E_eV) / ma.sqrt(1 + e * E_eV / 2 / m / c**2) * 10**10 + sigma = (2 * np.pi / lam / E_eV) * (m * c**2 + e * E_eV) / (2 * m * c**2 + e * E_eV) return sigma diff --git a/py4DSTEM/process/wholepatternfit/wp_models.py b/py4DSTEM/process/wholepatternfit/wp_models.py index c0907a11e..e437916eb 100644 --- a/py4DSTEM/process/wholepatternfit/wp_models.py +++ b/py4DSTEM/process/wholepatternfit/wp_models.py @@ -289,12 +289,16 @@ def __init__( "radius": Parameter(radius), "sigma": Parameter(sigma), "intensity": Parameter(intensity), - "x center": WPF.coordinate_model.params["x center"] - if global_center - else Parameter(x0), - "y center": WPF.coordinate_model.params["y center"] - if global_center - else Parameter(y0), + "x center": ( + WPF.coordinate_model.params["x center"] + if global_center + else Parameter(x0) + ), + "y center": ( + WPF.coordinate_model.params["y center"] + if global_center + else Parameter(y0) + ), } super().__init__(name, params, model_type=WPFModelType.AMORPHOUS) diff --git a/py4DSTEM/visualize/vis_grid.py b/py4DSTEM/visualize/vis_grid.py index 9be754689..cb2581e01 100644 --- a/py4DSTEM/visualize/vis_grid.py +++ b/py4DSTEM/visualize/vis_grid.py @@ -281,7 +281,11 @@ def show_image_grid( ) else: _, _ = show( - ar, figax=(fig, ax), returnfig=True, title=print_title, **kwargs + ar, + figax=(fig, ax), + returnfig=True, + title=print_title, + **kwargs, ) except IndexError: ax.axis("off") diff --git a/py4DSTEM/visualize/vis_special.py b/py4DSTEM/visualize/vis_special.py index c8b8a8b12..2db48e371 100644 --- a/py4DSTEM/visualize/vis_special.py +++ b/py4DSTEM/visualize/vis_special.py @@ -504,20 +504,34 @@ def show_origin_meas(data): show_image_grid(get_ar=lambda i: [qx, qy][i], H=1, W=2, cmap="RdBu") -def show_origin_fit(data): +def show_origin_fit( + data, + plot_range=None, + axsize=(3, 3), +): """ Show the measured, fit, and residuals of the origin positions. - Args: - data (DataCube or Calibration or (3,2)-tuple of arrays - ((qx0_meas,qy0_meas),(qx0_fit,qy0_fit),(qx0_residuals,qy0_residuals)) + Parameters + ---------- + data: (DataCube or Calibration or (3,2)-tuple of arrays + ((qx0_meas,qy0_meas),(qx0_fit,qy0_fit),(qx0_residuals,qy0_residuals)) + plot_range: (tuple, list, or np.array) + Plotting range in units of pixels + axsize: (tuple) + Size of each plot axis + + Returns + ------- + + """ from py4DSTEM.data import Calibration from py4DSTEM.datacube import DataCube if isinstance(data, tuple): assert len(data) == 3 - qx0_meas, qy_meas = data[0] + qx0_meas, qy0_meas = data[0] qx0_fit, qy0_fit = data[1] qx0_residuals, qy0_residuals = data[2] elif isinstance(data, DataCube): @@ -531,18 +545,39 @@ def show_origin_fit(data): else: raise Exception("data must be of type Datacube or Calibration or tuple") + # Centered intensity plots + qx0_meas_plot = qx0_meas - np.median(qx0_meas) + qy0_meas_plot = qy0_meas - np.median(qy0_meas) + qx0_fit_plot = qx0_fit - np.median(qx0_fit) + qy0_fit_plot = qy0_fit - np.median(qy0_fit) + + # Determine plotting range + if plot_range is None: + plot_range = np.array((-1.0, 1.0)) * np.max( + (np.abs(qx0_fit_plot), np.abs(qy0_fit_plot)) + ) + else: + plot_range = np.array(plot_range) + if plot_range.ndim == 0: + plot_range = np.array((-1.0, 1.0)) * plot_range + + # plotting show_image_grid( get_ar=lambda i: [ - qx0_meas, - qx0_fit, + qx0_meas_plot, + qx0_fit_plot, qx0_residuals, - qy0_meas, - qy0_fit, + qy0_meas_plot, + qy0_fit_plot, qy0_residuals, ][i], H=2, W=3, cmap="RdBu", + intensity_range="absolute", + vmin=plot_range[0], + vmax=plot_range[1], + axsize=axsize, ) From dd4c6d814ad572bdd2ab2a3fc4323733f434e597 Mon Sep 17 00:00:00 2001 From: Colin Date: Mon, 26 Feb 2024 13:01:25 -0800 Subject: [PATCH 25/50] Default threshold should be zero --- py4DSTEM/datacube/datacube.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py4DSTEM/datacube/datacube.py b/py4DSTEM/datacube/datacube.py index 77c125721..930dd4c13 100644 --- a/py4DSTEM/datacube/datacube.py +++ b/py4DSTEM/datacube/datacube.py @@ -510,7 +510,7 @@ def get_vacuum_probe( ROI=None, align=True, mask=None, - threshold=0.2, + threshold=0.0, expansion=12, opening=3, verbose=False, From 6b3e8af5f91d3ae0ab4ea72bc83e6edbb088eb99 Mon Sep 17 00:00:00 2001 From: Steve Zeltmann <37132012+sezelt@users.noreply.github.com> Date: Wed, 28 Feb 2024 12:21:06 -0500 Subject: [PATCH 26/50] Release memory after Bayesian optimization (#599) * do not retain optimization function after finished running, to release memory * update comment * set clear_fft_cache off in optimizer defaults * Update parameter_optimize.py * format with black 24.1.1 * Update parameter_optimize.py * format with black --- py4DSTEM/process/phase/parameter_optimize.py | 51 +++++++++++++------- 1 file changed, 34 insertions(+), 17 deletions(-) diff --git a/py4DSTEM/process/phase/parameter_optimize.py b/py4DSTEM/process/phase/parameter_optimize.py index aa369872d..652e1046e 100644 --- a/py4DSTEM/process/phase/parameter_optimize.py +++ b/py4DSTEM/process/phase/parameter_optimize.py @@ -186,7 +186,7 @@ def evaluation_callback(ptycho): pbar.update(1) error_metric(ptycho) - self._grid_search_function = self._get_optimization_function( + grid_search_function = self._get_optimization_function( self._reconstruction_type, self._parameter_list, self._init_static_args, @@ -200,7 +200,7 @@ def evaluation_callback(ptycho): evaluation_callback, ) - grid_search_res = list(map(self._grid_search_function, params_grid)) + grid_search_res = list(map(grid_search_function, params_grid)) pbar.close() if plot_reconstructed_objects: @@ -290,7 +290,7 @@ def optimize( error_metric = self._get_error_metric(error_metric) - self._optimization_function = self._get_optimization_function( + optimization_function = self._get_optimization_function( self._reconstruction_type, self._parameter_list, self._init_static_args, @@ -312,22 +312,37 @@ def optimize( def callback(*args, **kwargs): pbar.update(1) - self._skopt_result = gp_minimize( - self._optimization_function, - self._parameter_list, - n_calls=n_calls, - n_initial_points=n_initial_points, - x0=self._x0, - callback=callback, - **skopt_kwargs, - ) + try: + self._skopt_result = gp_minimize( + optimization_function, + self._parameter_list, + n_calls=n_calls, + n_initial_points=n_initial_points, + x0=self._x0, + callback=callback, + **skopt_kwargs, + ) - print("Optimized parameters:") - for p, x in zip(self._parameter_list, self._skopt_result.x): - print(f"{p.name}: {x}") + # Remove the optimization result's reference to the function, as it potentially contains a + # copy of the ptycho object + del self._skopt_result["fun"] - # Finish the tqdm progressbar so subsequent things behave nicely - pbar.close() + # If using the GPU, free some cached stuff + if self._init_args.get("device", "cpu") == "gpu": + import cupy as cp + + cp.get_default_memory_pool().free_all_blocks() + cp.get_default_pinned_memory_pool().free_all_blocks() + from cupy.fft.config import get_plan_cache + + get_plan_cache().clear() + + print("Optimized parameters:") + for p, x in zip(self._parameter_list, self._skopt_result.x): + print(f"{p.name}: {x}") + finally: + # close the pbar gracefully on interrupt + pbar.close() return self @@ -644,6 +659,7 @@ def f(**kwargs): def _set_optimizer_defaults( self, verbose=False, + clear_fft_cache=False, plot_center_of_mass=False, plot_rotation=False, plot_probe_overlaps=False, @@ -655,6 +671,7 @@ def _set_optimizer_defaults( Set all of the verbose and plotting to False, allowing for user-overwrite. """ self._init_static_args["verbose"] = verbose + self._init_static_args["clear_fft_cache"] = clear_fft_cache self._preprocess_static_args["plot_center_of_mass"] = plot_center_of_mass self._preprocess_static_args["plot_rotation"] = plot_rotation From d7e7472da2dd0e23d440805288cd0bd739c10f82 Mon Sep 17 00:00:00 2001 From: Steven Zeltmann Date: Wed, 28 Feb 2024 13:07:59 -0500 Subject: [PATCH 27/50] remove duplicate make_fourier_coords_2D --- py4DSTEM/process/utils/utils.py | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/py4DSTEM/process/utils/utils.py b/py4DSTEM/process/utils/utils.py index aa49382b5..a7fe191af 100644 --- a/py4DSTEM/process/utils/utils.py +++ b/py4DSTEM/process/utils/utils.py @@ -11,16 +11,6 @@ import matplotlib.font_manager as fm from emdfile import tqdmnd -from py4DSTEM.process.utils.multicorr import upsampled_correlation -from py4DSTEM.preprocess.utils import make_Fourier_coords2D - -try: - from IPython.display import clear_output -except ImportError: - - def clear_output(wait=True): - pass - try: import cupy as cp @@ -173,25 +163,6 @@ def get_qx_qy_1d(M, dx=[1, 1], fft_shifted=False): return qxa, qya -def make_Fourier_coords2D(Nx, Ny, pixelSize=1): - """ - Generates Fourier coordinates for a (Nx,Ny)-shaped 2D array. - Specifying the pixelSize argument sets a unit size. - """ - if hasattr(pixelSize, "__len__"): - assert len(pixelSize) == 2, "pixelSize must either be a scalar or have length 2" - pixelSize_x = pixelSize[0] - pixelSize_y = pixelSize[1] - else: - pixelSize_x = pixelSize - pixelSize_y = pixelSize - - qx = np.fft.fftfreq(Nx, pixelSize_x) - qy = np.fft.fftfreq(Ny, pixelSize_y) - qy, qx = np.meshgrid(qy, qx) - return qx, qy - - def get_CoM(ar, device="cpu", corner_centered=False): """ Finds and returns the center of mass of array ar. From d478aa146cb3b5031b91208f6257a73fafe029ec Mon Sep 17 00:00:00 2001 From: Steven Zeltmann Date: Wed, 28 Feb 2024 13:10:22 -0500 Subject: [PATCH 28/50] format with black --- py4DSTEM/process/polar/polar_fits.py | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/py4DSTEM/process/polar/polar_fits.py b/py4DSTEM/process/polar/polar_fits.py index 81e6df721..18f059b61 100644 --- a/py4DSTEM/process/polar/polar_fits.py +++ b/py4DSTEM/process/polar/polar_fits.py @@ -7,14 +7,14 @@ def fit_amorphous_ring( - im = None, - datacube = None, + im=None, + datacube=None, center=None, radial_range=None, coefs=None, mask_dp=None, show_fit_mask=False, - fit_all_images = False, + fit_all_images=False, maxfev=None, verbose=False, plot_result=True, @@ -22,7 +22,7 @@ def fit_amorphous_ring( plot_int_scale=(-3, 3), figsize=(8, 8), return_all_coefs=True, - progress_bar = None, + progress_bar=None, ): """ Fit an amorphous halo with a two-sided Gaussian model, plus a background @@ -76,7 +76,6 @@ def fit_amorphous_ring( im = datacube.get_dp_mean() if progress_bar is None: progress_bar = True - # Default values if center is None: @@ -211,11 +210,8 @@ def fit_amorphous_ring( # Perform the fit on each individual diffration pattern if fit_all_images: - coefs_all = np.zeros(( - datacube.shape[0], - datacube.shape[1], - coefs.size)) - + coefs_all = np.zeros((datacube.shape[0], datacube.shape[1], coefs.size)) + for rx, ry in tqdmnd( datacube.shape[0], datacube.shape[1], @@ -223,7 +219,7 @@ def fit_amorphous_ring( unit=" probe positions", disable=not progress_bar, ): - vals = datacube.data[rx,ry][mask] + vals = datacube.data[rx, ry][mask] int_mean = np.mean(vals) if maxfev is None: @@ -248,9 +244,7 @@ def fit_amorphous_ring( coefs_single[4] = np.mod(coefs_single[4], 2 * np.pi) coefs_single[5:8] *= int_mean - coefs_all[rx,ry] = coefs_single - - + coefs_all[rx, ry] = coefs_single if verbose: print("x0 = " + str(np.round(coefs[0], 3)) + " px") @@ -277,7 +271,7 @@ def fit_amorphous_ring( return coefs else: if fit_all_images: - return coefs_all[:,:,:5] + return coefs_all[:, :, :5] else: return coefs[:5] From d3ec3c2ed8c25a29453de73e249d6eb373fe55a5 Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Wed, 28 Feb 2024 10:16:27 -0800 Subject: [PATCH 29/50] Added phase_contrast warning in README (#608) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 6df64dbc7..3fe6cc745 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ > :warning: **py4DSTEM version 0.14 update** :warning: Warning: this is a major update and we expect some workflows to break. You can still install previous versions of py4DSTEM [as discussed here](#legacyinstall) - +> :warning: **Phase retrieval refactor version 0.14.9** :warning: Warning: The phase-retrieval modules in py4DSTEM (DPC, parallax, and ptychography) underwent a major refactor in version 0.14.9 and as such older tutorial notebooks will not work as expected. Notably, class names have been pruned to remove the trailing "Reconstruction" (`DPCReconstruction` -> `DPC` etc.), and regularization functions have dropped the `_iter` suffix (and are instead specified as boolean flags). We are working on updating the tutorial notebooks to reflect these changes. In the meantime, there's some more information in the relevant pull request [here](https://github.com/py4dstem/py4DSTEM/pull/597#issuecomment-1890325568). ![py4DSTEM logo](/images/py4DSTEM_logo.png) From 84bbc046c231cfb2268ede2ba855021ff1a2f575 Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Wed, 6 Mar 2024 11:51:43 -0800 Subject: [PATCH 30/50] switching to non-vectorized datacube resampling --- py4DSTEM/preprocess/preprocess.py | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/py4DSTEM/preprocess/preprocess.py b/py4DSTEM/preprocess/preprocess.py index 41ddd7fd4..8e2b5e70a 100644 --- a/py4DSTEM/preprocess/preprocess.py +++ b/py4DSTEM/preprocess/preprocess.py @@ -617,6 +617,10 @@ def resample_data_diffraction( if resampling_factor.shape == (): resampling_factor = np.tile(resampling_factor, 2) + output_size = np.round( + resampling_factor * np.array(datacube.shape[-2:]) + ).astype("int") + else: if output_size is None: raise ValueError( @@ -630,12 +634,25 @@ def resample_data_diffraction( resampling_factor = np.array(output_size) / np.array(datacube.shape[-2:]) - resampling_factor = np.concatenate(((1, 1), resampling_factor)) - datacube.data = zoom( - datacube.data, resampling_factor, order=1, mode="grid-wrap", grid_mode=True - ) + output_data = np.zeros(datacube.Rshape + tuple(output_size)) + for Rx, Ry in tqdmnd( + datacube.shape[0], + datacube.shape[1], + desc="Resampling 4D datacube", + unit="DP", + unit_scale=True, + ): + output_data[Rx, Ry] = zoom( + datacube.data[Rx, Ry].astype(np.float32), + resampling_factor, + order=1, + mode="grid-wrap", + grid_mode=True, + ) + + datacube.data = output_data datacube.calibration.set_Q_pixel_size( - datacube.calibration.get_Q_pixel_size() / resampling_factor[2] + datacube.calibration.get_Q_pixel_size() / resampling_factor[0] ) else: From 963df98c8779891a122044d374b28f02f1156185 Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Sat, 9 Mar 2024 11:11:28 -0800 Subject: [PATCH 31/50] fixing region_of_interest_shape flag --- .../magnetic_ptychographic_tomography.py | 3 +- .../process/phase/magnetic_ptychography.py | 3 +- .../mixedstate_multislice_ptychography.py | 3 +- .../process/phase/mixedstate_ptychography.py | 3 +- .../process/phase/multislice_ptychography.py | 3 +- py4DSTEM/process/phase/phase_base_class.py | 2 +- .../process/phase/ptychographic_methods.py | 158 ++++++++++++------ .../process/phase/ptychographic_tomography.py | 3 +- .../process/phase/singleslice_ptychography.py | 3 +- py4DSTEM/process/phase/utils.py | 27 ++- 10 files changed, 146 insertions(+), 62 deletions(-) diff --git a/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py b/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py index 5fe2921a1..5b2239e23 100644 --- a/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py +++ b/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py @@ -221,7 +221,7 @@ def __init__( def preprocess( self, diffraction_intensities_shape: Tuple[int, int] = None, - reshaping_method: str = "fourier", + reshaping_method: str = "bilinear", padded_diffraction_intensities_shape: Tuple[int, int] = None, region_of_interest_shape: Tuple[int, int] = None, dp_mask: np.ndarray = None, @@ -373,6 +373,7 @@ def preprocess( (self._num_diffraction_patterns,) + roi_shape ) + self._amplitudes_shape = np.array(self._amplitudes.shape[-2:]) if region_of_interest_shape is not None: self._resample_exit_waves = True self._region_of_interest_shape = np.array(region_of_interest_shape) diff --git a/py4DSTEM/process/phase/magnetic_ptychography.py b/py4DSTEM/process/phase/magnetic_ptychography.py index dade15199..31b5b6fb5 100644 --- a/py4DSTEM/process/phase/magnetic_ptychography.py +++ b/py4DSTEM/process/phase/magnetic_ptychography.py @@ -199,7 +199,7 @@ def __init__( def preprocess( self, diffraction_intensities_shape: Tuple[int, int] = None, - reshaping_method: str = "fourier", + reshaping_method: str = "bilinear", padded_diffraction_intensities_shape: Tuple[int, int] = None, region_of_interest_shape: Tuple[int, int] = None, dp_mask: np.ndarray = None, @@ -407,6 +407,7 @@ def preprocess( (self._num_diffraction_patterns,) + roi_shape ) + self._amplitudes_shape = np.array(self._amplitudes.shape[-2:]) if region_of_interest_shape is not None: self._resample_exit_waves = True self._region_of_interest_shape = np.array(region_of_interest_shape) diff --git a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py index 4bbafacc2..357653a83 100644 --- a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py @@ -258,7 +258,7 @@ def __init__( def preprocess( self, diffraction_intensities_shape: Tuple[int, int] = None, - reshaping_method: str = "fourier", + reshaping_method: str = "bilinear", padded_diffraction_intensities_shape: Tuple[int, int] = None, region_of_interest_shape: Tuple[int, int] = None, dp_mask: np.ndarray = None, @@ -488,6 +488,7 @@ def preprocess( del _intensities self._num_diffraction_patterns = self._amplitudes.shape[0] + self._amplitudes_shape = np.array(self._amplitudes.shape[-2:]) if region_of_interest_shape is not None: self._resample_exit_waves = True diff --git a/py4DSTEM/process/phase/mixedstate_ptychography.py b/py4DSTEM/process/phase/mixedstate_ptychography.py index befa9d73c..386c6b4d7 100644 --- a/py4DSTEM/process/phase/mixedstate_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_ptychography.py @@ -204,7 +204,7 @@ def __init__( def preprocess( self, diffraction_intensities_shape: Tuple[int, int] = None, - reshaping_method: str = "fourier", + reshaping_method: str = "bilinear", padded_diffraction_intensities_shape: Tuple[int, int] = None, region_of_interest_shape: Tuple[int, int] = None, dp_mask: np.ndarray = None, @@ -434,6 +434,7 @@ def preprocess( del _intensities self._num_diffraction_patterns = self._amplitudes.shape[0] + self._amplitudes_shape = np.array(self._amplitudes.shape[-2:]) if region_of_interest_shape is not None: self._resample_exit_waves = True diff --git a/py4DSTEM/process/phase/multislice_ptychography.py b/py4DSTEM/process/phase/multislice_ptychography.py index 1925ef372..4dd793dc0 100644 --- a/py4DSTEM/process/phase/multislice_ptychography.py +++ b/py4DSTEM/process/phase/multislice_ptychography.py @@ -232,7 +232,7 @@ def __init__( def preprocess( self, diffraction_intensities_shape: Tuple[int, int] = None, - reshaping_method: str = "fourier", + reshaping_method: str = "bilinear", padded_diffraction_intensities_shape: Tuple[int, int] = None, region_of_interest_shape: Tuple[int, int] = None, dp_mask: np.ndarray = None, @@ -462,6 +462,7 @@ def preprocess( del _intensities self._num_diffraction_patterns = self._amplitudes.shape[0] + self._amplitudes_shape = np.array(self._amplitudes.shape[-2:]) if region_of_interest_shape is not None: self._resample_exit_waves = True diff --git a/py4DSTEM/process/phase/phase_base_class.py b/py4DSTEM/process/phase/phase_base_class.py index 6b097ec82..2bc662933 100644 --- a/py4DSTEM/process/phase/phase_base_class.py +++ b/py4DSTEM/process/phase/phase_base_class.py @@ -2198,7 +2198,7 @@ def sampling(self): return tuple( electron_wavelength_angstrom(self._energy) * 1e3 / dk / n - for dk, n in zip(self.angular_sampling, self._region_of_interest_shape) + for dk, n in zip(self.angular_sampling, self._amplitudes_shape) ) @property diff --git a/py4DSTEM/process/phase/ptychographic_methods.py b/py4DSTEM/process/phase/ptychographic_methods.py index 84aff150e..cf3ee29bd 100644 --- a/py4DSTEM/process/phase/ptychographic_methods.py +++ b/py4DSTEM/process/phase/ptychographic_methods.py @@ -8,15 +8,20 @@ from py4DSTEM.process.phase.utils import ( AffineTransform, ComplexProbe, + bilinear_resample, copy_to_device, fft_shift, generate_batches, partition_list, rotate_point, spatial_frequencies, - vectorized_bilinear_resample, ) -from py4DSTEM.process.utils import electron_wavelength_angstrom, get_CoM, get_shifted_ar +from py4DSTEM.process.utils import ( + align_and_shift_images, + electron_wavelength_angstrom, + get_CoM, + get_shifted_ar, +) from py4DSTEM.visualize import return_scaled_histogram_ordering, show, show_complex from scipy.ndimage import gaussian_filter, rotate @@ -993,6 +998,17 @@ def _initialize_probe( vacuum_probe_intensity = xp.asarray( vacuum_probe_intensity, dtype=xp.float32 ) + + sx, sy = vacuum_probe_intensity.shape + tx, ty = region_of_interest_shape + if sx != tx or sy != ty: + vacuum_probe_intensity = bilinear_resample( + vacuum_probe_intensity, + output_size=(tx, ty), + vectorized=True, + xp=xp, + ) + probe_x0, probe_y0 = get_CoM( vacuum_probe_intensity, device=device, @@ -1502,29 +1518,36 @@ def _gradient_descent_fourier_projection(self, amplitudes, overlap, fourier_mask xp = self._xp - # resample to match data, note: this needs to happen in real-space + fourier_overlap = xp.fft.fft2(overlap) # * fourier_mask # GV votes to include + + # resample to match data, note: this needs to happen in reciprocal-space if self._resample_exit_waves: - overlap = vectorized_bilinear_resample( - overlap, output_size=amplitudes.shape[-2:], xp=xp + fourier_overlap = bilinear_resample( + fourier_overlap, + output_size=self._amplitudes_shape, + vectorized=True, + xp=xp, ) - fourier_overlap = xp.fft.fft2(overlap) # * fourier_mask # GV votes to include farfield_amplitudes = self._return_farfield_amplitudes(fourier_overlap) error = xp.sum(xp.abs(amplitudes - farfield_amplitudes) ** 2) - fourier_modified_overlap = amplitudes * xp.exp(1j * xp.angle(fourier_overlap)) fourier_modified_overlap = ( fourier_modified_overlap - fourier_overlap ) * fourier_mask - exit_waves = xp.fft.ifft2(fourier_modified_overlap) - # resample back to region_of_interest_shape, note: this needs to happen in real-space + # resample back to region_of_interest_shape, note: this needs to happen in reciprocal-space if self._resample_exit_waves: - exit_waves = vectorized_bilinear_resample( - exit_waves, output_size=self._region_of_interest_shape, xp=xp + fourier_modified_overlap = bilinear_resample( + fourier_modified_overlap, + output_size=self._region_of_interest_shape, + vectorized=True, + xp=xp, ) + exit_waves = xp.fft.ifft2(fourier_modified_overlap) + return exit_waves, error def _projection_sets_fourier_projection( @@ -1587,14 +1610,17 @@ def _projection_sets_fourier_projection( exit_waves = overlap.copy() factor_to_be_projected = projection_c * overlap + projection_y * exit_waves + fourier_projected_factor = xp.fft.fft2(factor_to_be_projected) - # resample to match data, note: this needs to happen in real-space + # resample to match data, note: this needs to happen in reciprocal-space if self._resample_exit_waves: - factor_to_be_projected = vectorized_bilinear_resample( - factor_to_be_projected, output_size=amplitudes.shape[-2:], xp=xp + fourier_projected_factor = bilinear_resample( + fourier_projected_factor, + output_size=self._amplitudes_shape, + vectorized=True, + xp=xp, ) - fourier_projected_factor = xp.fft.fft2(factor_to_be_projected) farfield_amplitudes = self._return_farfield_amplitudes(fourier_projected_factor) error = xp.sum(xp.abs(amplitudes - farfield_amplitudes) ** 2) @@ -1602,14 +1628,17 @@ def _projection_sets_fourier_projection( 1j * xp.angle(fourier_projected_factor) ) - projected_factor = xp.fft.ifft2(fourier_projected_factor) - - # resample back to region_of_interest_shape, note: this needs to happen in real-space + # resample back to region_of_interest_shape, note: this needs to happen in reciprocal-space if self._resample_exit_waves: - projected_factor = vectorized_bilinear_resample( - projected_factor, output_size=self._region_of_interest_shape, xp=xp + fourier_projected_factor = bilinear_resample( + fourier_projected_factor, + output_size=self._region_of_interest_shape, + vectorized=True, + xp=xp, ) + projected_factor = xp.fft.ifft2(fourier_projected_factor) + exit_waves = ( projection_x * exit_waves + projection_a * overlap @@ -1998,14 +2027,15 @@ def _position_correction( xp = self._xp storage = self._storage - # resample to match data, note: this needs to happen in real-space + overlap_fft = xp.fft.fft2(overlap) + + # resample to match data, note: this needs to happen in reciprocal-space if self._resample_exit_waves: - overlap = vectorized_bilinear_resample( - overlap, output_size=amplitudes.shape[-2:], xp=xp + overlap_fft = bilinear_resample( + overlap_fft, output_size=self._amplitudes_shape, vectorized=True, xp=xp ) # unperturbed - overlap_fft = xp.fft.fft2(overlap) overlap_fft_conj = xp.conj(overlap_fft) estimated_intensity = self._return_farfield_amplitudes(overlap_fft) ** 2 @@ -2033,18 +2063,27 @@ def _position_correction( shifted_probes, ) - # resample to match data, note: this needs to happen in real-space + overlap_dx_fft = xp.fft.fft2(overlap_dx) + overlap_dy_fft = xp.fft.fft2(overlap_dy) + + # resample to match data, note: this needs to happen in reciprocal-space if self._resample_exit_waves: - overlap_dx = vectorized_bilinear_resample( - overlap_dx, output_size=amplitudes.shape[-2:], xp=xp + overlap_dx_fft = bilinear_resample( + overlap_dx_fft, + output_size=self._amplitudes_shape, + vectorized=True, + xp=xp, ) - overlap_dy = vectorized_bilinear_resample( - overlap_dy, output_size=amplitudes.shape[-2:], xp=xp + overlap_dy_fft = bilinear_resample( + overlap_dy_fft, + output_size=self._amplitudes_shape, + vectorized=True, + xp=xp, ) # partial intensities - overlap_dx_fft = overlap_fft - xp.fft.fft2(overlap_dx) - overlap_dy_fft = overlap_fft - xp.fft.fft2(overlap_dy) + overlap_dx_fft = overlap_fft - overlap_dx_fft + overlap_dy_fft = overlap_fft - overlap_dy_fft partial_intensity_dx = 2 * xp.real(overlap_dx_fft * overlap_fft_conj) partial_intensity_dy = 2 * xp.real(overlap_dy_fft * overlap_fft_conj) @@ -2135,13 +2174,17 @@ def _return_self_consistency_errors( shifted_probes, ) - # resample to match data, note: this needs to happen in real-space + fourier_overlap = xp.fft.fft2(overlap) + + # resample to match data, note: this needs to happen in reciprocal-space if self._resample_exit_waves: - overlap = vectorized_bilinear_resample( - overlap, output_size=amplitudes_device.shape[-2:], xp=xp + fourier_overlap = bilinear_resample( + fourier_overlap, + output_size=self._amplitudes_shape, + vectorized=True, + xp=xp, ) - fourier_overlap = xp.fft.fft2(overlap) farfield_amplitudes = self._return_farfield_amplitudes(fourier_overlap) # Normalized mean-squared errors @@ -2627,13 +2670,17 @@ def _gradient_descent_fourier_projection(self, amplitudes, overlap, fourier_mask xp = self._xp - # resample to match data, note: this needs to happen in real-space + fourier_overlap = xp.fft.fft2(overlap) # * fourier_mask # GV votes to include + + # resample to match data, note: this needs to happen in reciprocal-space if self._resample_exit_waves: - overlap = vectorized_bilinear_resample( - overlap, output_size=amplitudes.shape[-2:], xp=xp + fourier_overlap = bilinear_resample( + fourier_overlap, + output_size=self._amplitudes_shape, + vectorized=True, + xp=xp, ) - fourier_overlap = xp.fft.fft2(overlap) # * fourier_mask # GV votes to include farfield_amplitudes = self._return_farfield_amplitudes(fourier_overlap) error = xp.sum(xp.abs(amplitudes - farfield_amplitudes) ** 2) @@ -2645,14 +2692,18 @@ def _gradient_descent_fourier_projection(self, amplitudes, overlap, fourier_mask fourier_modified_overlap = ( fourier_modified_overlap - fourier_overlap ) * fourier_mask - exit_waves = xp.fft.ifft2(fourier_modified_overlap) - # resample back to region_of_interest_shape, note: this needs to happen in real-space + # resample back to region_of_interest_shape, note: this needs to happen in reciprocal-space if self._resample_exit_waves: - exit_waves = vectorized_bilinear_resample( - exit_waves, output_size=self._region_of_interest_shape, xp=xp + fourier_modified_overlap = bilinear_resample( + fourier_modified_overlap, + output_size=self._region_of_interest_shape, + vectorized=True, + xp=xp, ) + exit_waves = xp.fft.ifft2(fourier_modified_overlap) + return exit_waves, error def _projection_sets_fourier_projection( @@ -2715,14 +2766,17 @@ def _projection_sets_fourier_projection( exit_waves = overlap.copy() factor_to_be_projected = projection_c * overlap + projection_y * exit_waves + fourier_projected_factor = xp.fft.fft2(factor_to_be_projected) - # resample to match data, note: this needs to happen in real-space + # resample to match data, note: this needs to happen in reciprocal-space if self._resample_exit_waves: - factor_to_be_projected = vectorized_bilinear_resample( - factor_to_be_projected, output_size=amplitudes.shape[-2:], xp=xp + fourier_projected_factor = bilinear_resample( + fourier_projected_factor, + output_size=self._amplitudes_shape, + vectorized=True, + xp=xp, ) - fourier_projected_factor = xp.fft.fft2(factor_to_be_projected) farfield_amplitudes = self._return_farfield_amplitudes(fourier_projected_factor) error = xp.sum(xp.abs(amplitudes - farfield_amplitudes) ** 2) @@ -2730,14 +2784,18 @@ def _projection_sets_fourier_projection( amplitude_modification = amplitudes / farfield_amplitudes fourier_projected_factor *= amplitude_modification[:, None] - projected_factor = xp.fft.ifft2(fourier_projected_factor) # resample back to region_of_interest_shape, note: this needs to happen in real-space if self._resample_exit_waves: - projected_factor = vectorized_bilinear_resample( - projected_factor, output_size=self._region_of_interest_shape, xp=xp + fourier_projected_factor = bilinear_resample( + fourier_projected_factor, + output_size=self._region_of_interest_shape, + vectorized=True, + xp=xp, ) + projected_factor = xp.fft.ifft2(fourier_projected_factor) + exit_waves = ( projection_x * exit_waves + projection_a * overlap diff --git a/py4DSTEM/process/phase/ptychographic_tomography.py b/py4DSTEM/process/phase/ptychographic_tomography.py index ff081d85e..7471d8bb4 100644 --- a/py4DSTEM/process/phase/ptychographic_tomography.py +++ b/py4DSTEM/process/phase/ptychographic_tomography.py @@ -215,7 +215,7 @@ def __init__( def preprocess( self, diffraction_intensities_shape: Tuple[int, int] = None, - reshaping_method: str = "fourier", + reshaping_method: str = "bilinear", padded_diffraction_intensities_shape: Tuple[int, int] = None, region_of_interest_shape: Tuple[int, int] = None, dp_mask: np.ndarray = None, @@ -372,6 +372,7 @@ def preprocess( (self._num_diffraction_patterns,) + roi_shape ) + self._amplitudes_shape = np.array(self._amplitudes.shape[-2:]) if region_of_interest_shape is not None: self._resample_exit_waves = True self._region_of_interest_shape = np.array(region_of_interest_shape) diff --git a/py4DSTEM/process/phase/singleslice_ptychography.py b/py4DSTEM/process/phase/singleslice_ptychography.py index 857c8e679..60fb7ee97 100644 --- a/py4DSTEM/process/phase/singleslice_ptychography.py +++ b/py4DSTEM/process/phase/singleslice_ptychography.py @@ -186,7 +186,7 @@ def __init__( def preprocess( self, diffraction_intensities_shape: Tuple[int, int] = None, - reshaping_method: str = "fourier", + reshaping_method: str = "bilinear", padded_diffraction_intensities_shape: Tuple[int, int] = None, region_of_interest_shape: Tuple[int, int] = None, dp_mask: np.ndarray = None, @@ -407,6 +407,7 @@ def preprocess( del _intensities self._num_diffraction_patterns = self._amplitudes.shape[0] + self._amplitudes_shape = np.array(self._amplitudes.shape[-2:]) if region_of_interest_shape is not None: self._resample_exit_waves = True diff --git a/py4DSTEM/process/phase/utils.py b/py4DSTEM/process/phase/utils.py index 98d8fdd49..022ba369c 100644 --- a/py4DSTEM/process/phase/utils.py +++ b/py4DSTEM/process/phase/utils.py @@ -2244,17 +2244,18 @@ def lanczos_kernel_density_estimate( return pix_output -def vectorized_bilinear_resample( +def bilinear_resample( array, scale=None, output_size=None, mode="grid-wrap", grid_mode=True, + vectorized=True, xp=np, ): """ Resize an array along its final two axes. - Note, this is vectorized and thus very memory-intensive. + Note, this is vectorized by default and thus very memory-intensive. The scaling of the array can be specified by passing either `scale`, which sets the scaling factor along both axes to be scaled; or by passing `output_size`, @@ -2298,9 +2299,27 @@ def vectorized_bilinear_resample( scale_output = (1,) * (array_size.size - input_size.size) + scale_output if xp is np: - array = zoom(array, scale_output, order=1, mode=mode, grid_mode=grid_mode) + zoom_xp = zoom + else: + zoom_xp = zoom_cp + + if vectorized: + array = zoom_xp(array, scale_output, order=1, mode=mode, grid_mode=grid_mode) else: - array = zoom_cp(array, scale_output, order=1, mode=mode, grid_mode=grid_mode) + flat_array = array.reshape((-1,) + tuple(input_size)) + out_array = xp.zeros( + (flat_array.shape[0],) + tuple(output_size), flat_array.dtype + ) + for idx in range(flat_array.shape[0]): + out_array[idx] = zoom_xp( + flat_array[idx], + scale_output[-2:], + order=1, + mode=mode, + grid_mode=grid_mode, + ) + + array = out_array.reshape(tuple(array_size[:-2]) + tuple(output_size)) return array From b772a8c17d01285da7107adc6d06fe1b44bbb72f Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Sat, 9 Mar 2024 11:40:54 -0800 Subject: [PATCH 32/50] adding cross-correlation function --- .../process/phase/ptychographic_methods.py | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/py4DSTEM/process/phase/ptychographic_methods.py b/py4DSTEM/process/phase/ptychographic_methods.py index cf3ee29bd..3a31b0633 100644 --- a/py4DSTEM/process/phase/ptychographic_methods.py +++ b/py4DSTEM/process/phase/ptychographic_methods.py @@ -3,6 +3,7 @@ import matplotlib.pyplot as plt import numpy as np +from emdfile import tqdmnd from matplotlib.gridspec import GridSpec from mpl_toolkits.axes_grid1 import make_axes_locatable from py4DSTEM.process.phase.utils import ( @@ -1494,6 +1495,47 @@ def _return_farfield_amplitudes(self, fourier_overlap): xp = self._xp return xp.abs(fourier_overlap) + def cross_correlate_amplitudes_to_probe_aperture( + self, upsample_factor=4, progress_bar=True, probe=None + ): + """ + Cross-correlates the measured amplitudes with the current probe aperture. + Modifies self._amplitudes in-place. + + Parameters + ---------- + upsample_factor: float + Upsampling factor used in cross-correlation. Must be larger than 2 + probe: np.ndarray, optional + Probe to use for centering. Passed to _return_single_probe(probe) + + Returns + ------- + self to accommodate chaining + """ + xp = self._xp + storage = self._storage + + num_dps = self._num_diffraction_patterns + + single_probe = self._return_single_probe(probe) + probe_aperture = copy_to_device(xp.abs(xp.fft.fft2(single_probe)), storage) + + for idx in tqdmnd( + num_dps, + desc="Cross-correlating amplitudes", + unit="DP", + disable=not progress_bar, + ): + self._amplitudes[idx] = align_and_shift_images( + probe_aperture, + self._amplitudes[idx], + upsample_factor=upsample_factor, + device=storage, + ) + + return self + def _gradient_descent_fourier_projection(self, amplitudes, overlap, fourier_mask): """ Ptychographic fourier projection method for GD method. From 2d474cd3f40142a53874ab4f16395058e77be8c0 Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Tue, 12 Mar 2024 16:06:28 -0700 Subject: [PATCH 33/50] more careful array sums interpolation --- py4DSTEM/datacube/datacube.py | 10 +++++++-- py4DSTEM/preprocess/preprocess.py | 14 +++++++++++-- py4DSTEM/process/phase/phase_base_class.py | 21 ++++++++++++++++--- .../process/phase/ptychographic_methods.py | 18 +++++++++++++++- py4DSTEM/process/phase/utils.py | 8 ++++++- py4DSTEM/process/utils/utils.py | 5 ++++- 6 files changed, 66 insertions(+), 10 deletions(-) diff --git a/py4DSTEM/datacube/datacube.py b/py4DSTEM/datacube/datacube.py index 77c125721..30e6a752e 100644 --- a/py4DSTEM/datacube/datacube.py +++ b/py4DSTEM/datacube/datacube.py @@ -405,7 +405,9 @@ def pad_Q(self, N=None, output_size=None): d = pad_data_diffraction(self, pad_factor=N, output_size=output_size) return d - def resample_Q(self, N=None, output_size=None, method="bilinear"): + def resample_Q( + self, N=None, output_size=None, method="bilinear", conserve_array_sums=False + ): """ Resamples the data in diffraction space by resampling factor N, or to match output_size, using either 'fourier' or 'bilinear' interpolation. @@ -418,7 +420,11 @@ def resample_Q(self, N=None, output_size=None, method="bilinear"): from py4DSTEM.preprocess import resample_data_diffraction d = resample_data_diffraction( - self, resampling_factor=N, output_size=output_size, method=method + self, + resampling_factor=N, + output_size=output_size, + method=method, + conserve_array_sums=conserve_array_sums, ) return d diff --git a/py4DSTEM/preprocess/preprocess.py b/py4DSTEM/preprocess/preprocess.py index 8e2b5e70a..aa7365765 100644 --- a/py4DSTEM/preprocess/preprocess.py +++ b/py4DSTEM/preprocess/preprocess.py @@ -573,7 +573,11 @@ def datacube_diffraction_shift( def resample_data_diffraction( - datacube, resampling_factor=None, output_size=None, method="bilinear" + datacube, + resampling_factor=None, + output_size=None, + method="bilinear", + conserve_array_sums=False, ): """ Performs diffraction space resampling of data by resampling_factor or to match output_size. @@ -594,7 +598,10 @@ def resample_data_diffraction( old_size = datacube.data.shape datacube.data = fourier_resample( - datacube.data, scale=resampling_factor, output_size=output_size + datacube.data, + scale=resampling_factor, + output_size=output_size, + conserve_array_sums=conserve_array_sums, ) if not resampling_factor: @@ -650,6 +657,9 @@ def resample_data_diffraction( grid_mode=True, ) + if conserve_array_sums: + output_data = output_data / resampling_factor.prod() + datacube.data = output_data datacube.calibration.set_Q_pixel_size( datacube.calibration.get_Q_pixel_size() / resampling_factor[0] diff --git a/py4DSTEM/process/phase/phase_base_class.py b/py4DSTEM/process/phase/phase_base_class.py index 2bc662933..36e0c598a 100644 --- a/py4DSTEM/process/phase/phase_base_class.py +++ b/py4DSTEM/process/phase/phase_base_class.py @@ -346,34 +346,49 @@ def _preprocess_datacube_and_vacuum_probe( elif reshaping_method == "fourier": datacube = datacube.resample_Q( - N=resampling_factor_x, method=reshaping_method + N=resampling_factor_x, + method=reshaping_method, + conserve_array_sums=True, ) if vacuum_probe_intensity is not None: vacuum_probe_intensity = fourier_resample( vacuum_probe_intensity, output_size=diffraction_intensities_shape, force_nonnegative=True, + conserve_array_sums=True, ) if dp_mask is not None: dp_mask = fourier_resample( dp_mask, output_size=diffraction_intensities_shape, force_nonnegative=True, + conserve_array_sums=False, ) elif reshaping_method == "bilinear": datacube = datacube.resample_Q( - N=resampling_factor_x, method=reshaping_method + N=resampling_factor_x, + method=reshaping_method, + conserve_array_sums=True, ) if vacuum_probe_intensity is not None: vacuum_probe_intensity = zoom( vacuum_probe_intensity, (resampling_factor_x, resampling_factor_x), order=1, + mode="grid-wrap", + grid_mode=True, + ) + vacuum_probe_intensity = ( + vacuum_probe_intensity / resampling_factor_x**2 ) if dp_mask is not None: dp_mask = zoom( - dp_mask, (resampling_factor_x, resampling_factor_x), order=1 + dp_mask, + (resampling_factor_x, resampling_factor_x), + order=1, + mode="grid-wrap", + grid_mode=True, ) else: diff --git a/py4DSTEM/process/phase/ptychographic_methods.py b/py4DSTEM/process/phase/ptychographic_methods.py index 3a31b0633..40d03b6b8 100644 --- a/py4DSTEM/process/phase/ptychographic_methods.py +++ b/py4DSTEM/process/phase/ptychographic_methods.py @@ -1007,6 +1007,7 @@ def _initialize_probe( vacuum_probe_intensity, output_size=(tx, ty), vectorized=True, + conserve_array_sums=True, xp=xp, ) @@ -1568,6 +1569,7 @@ def _gradient_descent_fourier_projection(self, amplitudes, overlap, fourier_mask fourier_overlap, output_size=self._amplitudes_shape, vectorized=True, + conserve_array_sums=True, xp=xp, ) @@ -1585,6 +1587,7 @@ def _gradient_descent_fourier_projection(self, amplitudes, overlap, fourier_mask fourier_modified_overlap, output_size=self._region_of_interest_shape, vectorized=True, + conserve_array_sums=True, xp=xp, ) @@ -1660,6 +1663,7 @@ def _projection_sets_fourier_projection( fourier_projected_factor, output_size=self._amplitudes_shape, vectorized=True, + conserve_array_sums=True, xp=xp, ) @@ -1676,6 +1680,7 @@ def _projection_sets_fourier_projection( fourier_projected_factor, output_size=self._region_of_interest_shape, vectorized=True, + conserve_array_sums=True, xp=xp, ) @@ -2074,7 +2079,11 @@ def _position_correction( # resample to match data, note: this needs to happen in reciprocal-space if self._resample_exit_waves: overlap_fft = bilinear_resample( - overlap_fft, output_size=self._amplitudes_shape, vectorized=True, xp=xp + overlap_fft, + output_size=self._amplitudes_shape, + vectorized=True, + conserve_array_sums=True, + xp=xp, ) # unperturbed @@ -2114,12 +2123,14 @@ def _position_correction( overlap_dx_fft, output_size=self._amplitudes_shape, vectorized=True, + conserve_array_sums=True, xp=xp, ) overlap_dy_fft = bilinear_resample( overlap_dy_fft, output_size=self._amplitudes_shape, vectorized=True, + conserve_array_sums=True, xp=xp, ) @@ -2224,6 +2235,7 @@ def _return_self_consistency_errors( fourier_overlap, output_size=self._amplitudes_shape, vectorized=True, + conserve_array_sums=True, xp=xp, ) @@ -2720,6 +2732,7 @@ def _gradient_descent_fourier_projection(self, amplitudes, overlap, fourier_mask fourier_overlap, output_size=self._amplitudes_shape, vectorized=True, + conserve_array_sums=True, xp=xp, ) @@ -2741,6 +2754,7 @@ def _gradient_descent_fourier_projection(self, amplitudes, overlap, fourier_mask fourier_modified_overlap, output_size=self._region_of_interest_shape, vectorized=True, + conserve_array_sums=True, xp=xp, ) @@ -2816,6 +2830,7 @@ def _projection_sets_fourier_projection( fourier_projected_factor, output_size=self._amplitudes_shape, vectorized=True, + conserve_array_sums=True, xp=xp, ) @@ -2833,6 +2848,7 @@ def _projection_sets_fourier_projection( fourier_projected_factor, output_size=self._region_of_interest_shape, vectorized=True, + conserve_array_sums=True, xp=xp, ) diff --git a/py4DSTEM/process/phase/utils.py b/py4DSTEM/process/phase/utils.py index 022ba369c..a5a541795 100644 --- a/py4DSTEM/process/phase/utils.py +++ b/py4DSTEM/process/phase/utils.py @@ -2251,6 +2251,7 @@ def bilinear_resample( mode="grid-wrap", grid_mode=True, vectorized=True, + conserve_array_sums=False, xp=np, ): """ @@ -2321,6 +2322,9 @@ def bilinear_resample( array = out_array.reshape(tuple(array_size[:-2]) + tuple(output_size)) + if conserve_array_sums: + array = array / np.array(scale_output).prod() + return array @@ -2328,6 +2332,7 @@ def vectorized_fourier_resample( array, scale=None, output_size=None, + conserve_array_sums=False, xp=np, ): """ @@ -2489,7 +2494,8 @@ def vectorized_fourier_resample( array_resize = xp.real(xp.fft.ifft2(array_resize)).astype(xp.float32) # Normalization - array_resize *= scale_output + if not conserve_array_sums: + array_resize = array_resize * scale_output return array_resize diff --git a/py4DSTEM/process/utils/utils.py b/py4DSTEM/process/utils/utils.py index aa49382b5..6182faeff 100644 --- a/py4DSTEM/process/utils/utils.py +++ b/py4DSTEM/process/utils/utils.py @@ -446,6 +446,7 @@ def fourier_resample( bandlimit_nyquist=None, bandlimit_power=2, dtype=np.float32, + conserve_array_sums=False, ): """ Resize a 2D array along any dimension, using Fourier interpolation / extrapolation. @@ -464,6 +465,7 @@ def fourier_resample( bandlimit_nyquist (float): Gaussian filter information limit in Nyquist units (0.5 max in both directions) bandlimit_power (float): Gaussian filter power law scaling (higher is sharper) dtype (numpy dtype): datatype for binned array. default is single precision float + conserve_arrray_sums (bool): If True, the sums of the array are conserved Returns: the resized array (2D/4D numpy array) @@ -661,7 +663,8 @@ def fourier_resample( array_resize = np.maximum(array_resize, 0) # Normalization - array_resize = array_resize * scale_output + if not conserve_array_sums: + array_resize = array_resize * scale_output return array_resize From 0ac074dff5338b43382d88b4c1b0aec56861f5cc Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Wed, 13 Mar 2024 06:04:07 -0700 Subject: [PATCH 34/50] Bump gdown to 5.1.0 (#617) --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 5c662dcf0..bdb2c48ff 100644 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ "scikit-optimize >= 0.9.0", "tqdm >= 4.46.1", "dill >= 0.3.3", - "gdown >= 4.7.1", + "gdown >= 5.1.0", "dask >= 2.3.0", "distributed >= 2.3.0", "emdfile >= 0.0.14", From 1afa166259e0367a68f7e522d3c11126a933b9d6 Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Wed, 13 Mar 2024 15:43:31 -0700 Subject: [PATCH 35/50] docstring change per Colin review --- py4DSTEM/process/phase/magnetic_ptychographic_tomography.py | 2 +- py4DSTEM/process/phase/magnetic_ptychography.py | 2 +- py4DSTEM/process/phase/mixedstate_multislice_ptychography.py | 2 +- py4DSTEM/process/phase/mixedstate_ptychography.py | 2 +- py4DSTEM/process/phase/multislice_ptychography.py | 2 +- py4DSTEM/process/phase/singleslice_ptychography.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py b/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py index 5b2239e23..7f9eeda87 100644 --- a/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py +++ b/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py @@ -966,7 +966,7 @@ def reconstruct( fix_potential_baseline: bool If true, the potential mean outside the FOV is forced to zero at each iteration detector_fourier_mask: np.ndarray - Corner-centered mask to apply at the detector-plane for zeroing-out unreliable gradients. + Corner-centered mask to multiply the detector-plane gradients with (a value of zero supresses those pixels). Useful when detector has artifacts such as dead-pixels. Usually binary. store_iterations: bool, optional If True, reconstructed objects and probes are stored at each iteration diff --git a/py4DSTEM/process/phase/magnetic_ptychography.py b/py4DSTEM/process/phase/magnetic_ptychography.py index 31b5b6fb5..5da08e5c3 100644 --- a/py4DSTEM/process/phase/magnetic_ptychography.py +++ b/py4DSTEM/process/phase/magnetic_ptychography.py @@ -1285,7 +1285,7 @@ def reconstruct( fix_potential_baseline: bool If true, the potential mean outside the FOV is forced to zero at each iteration detector_fourier_mask: np.ndarray - Corner-centered mask to apply at the detector-plane for zeroing-out unreliable gradients. + Corner-centered mask to multiply the detector-plane gradients with (a value of zero supresses those pixels). Useful when detector has artifacts such as dead-pixels. Usually binary. store_iterations: bool, optional If True, reconstructed objects and probes are stored at each iteration diff --git a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py index 357653a83..19e6b617c 100644 --- a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py @@ -853,7 +853,7 @@ def reconstruct( fix_potential_baseline: bool If true, the potential mean outside the FOV is forced to zero at each iteration detector_fourier_mask: np.ndarray - Corner-centered mask to apply at the detector-plane for zeroing-out unreliable gradients. + Corner-centered mask to multiply the detector-plane gradients with (a value of zero supresses those pixels). Useful when detector has artifacts such as dead-pixels. Usually binary. pure_phase_object: bool, optional If True, object amplitude is set to unity diff --git a/py4DSTEM/process/phase/mixedstate_ptychography.py b/py4DSTEM/process/phase/mixedstate_ptychography.py index 386c6b4d7..c57802ed8 100644 --- a/py4DSTEM/process/phase/mixedstate_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_ptychography.py @@ -759,7 +759,7 @@ def reconstruct( fix_potential_baseline: bool If true, the potential mean outside the FOV is forced to zero at each iteration detector_fourier_mask: np.ndarray - Corner-centered mask to apply at the detector-plane for zeroing-out unreliable gradients. + Corner-centered mask to multiply the detector-plane gradients with (a value of zero supresses those pixels). Useful when detector has artifacts such as dead-pixels. Usually binary. store_iterations: bool, optional If True, reconstructed objects and probes are stored at each iteration diff --git a/py4DSTEM/process/phase/multislice_ptychography.py b/py4DSTEM/process/phase/multislice_ptychography.py index 4dd793dc0..943596428 100644 --- a/py4DSTEM/process/phase/multislice_ptychography.py +++ b/py4DSTEM/process/phase/multislice_ptychography.py @@ -825,7 +825,7 @@ def reconstruct( fix_potential_baseline: bool If true, the potential mean outside the FOV is forced to zero at each iteration detector_fourier_mask: np.ndarray - Corner-centered mask to apply at the detector-plane for zeroing-out unreliable gradients. + Corner-centered mask to multiply the detector-plane gradients with (a value of zero supresses those pixels). Useful when detector has artifacts such as dead-pixels. Usually binary. pure_phase_object: bool, optional If True, object amplitude is set to unity diff --git a/py4DSTEM/process/phase/singleslice_ptychography.py b/py4DSTEM/process/phase/singleslice_ptychography.py index 60fb7ee97..d4568f11c 100644 --- a/py4DSTEM/process/phase/singleslice_ptychography.py +++ b/py4DSTEM/process/phase/singleslice_ptychography.py @@ -729,7 +729,7 @@ def reconstruct( fix_potential_baseline: bool If true, the potential mean outside the FOV is forced to zero at each iteration detector_fourier_mask: np.ndarray - Corner-centered mask to apply at the detector-plane for zeroing-out unreliable gradients. + Corner-centered mask to multiply the detector-plane gradients with (a value of zero supresses those pixels). Useful when detector has artifacts such as dead-pixels. Usually binary. store_iterations: bool, optional If True, reconstructed objects and probes are stored at each iteration From 6806055279589748e30cead0ef16a6e52cbeb53e Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Thu, 14 Mar 2024 14:24:13 -0700 Subject: [PATCH 36/50] DP resampling mode to nearest --- py4DSTEM/preprocess/preprocess.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py4DSTEM/preprocess/preprocess.py b/py4DSTEM/preprocess/preprocess.py index aa7365765..f18d9b621 100644 --- a/py4DSTEM/preprocess/preprocess.py +++ b/py4DSTEM/preprocess/preprocess.py @@ -653,7 +653,7 @@ def resample_data_diffraction( datacube.data[Rx, Ry].astype(np.float32), resampling_factor, order=1, - mode="grid-wrap", + mode="nearest", grid_mode=True, ) From 1137191b8813a410519b4e19cb585971999f7182 Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Thu, 14 Mar 2024 14:30:52 -0700 Subject: [PATCH 37/50] adding fourier_mask in forward mode, ie pre error calculation --- py4DSTEM/process/phase/ptychographic_methods.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/py4DSTEM/process/phase/ptychographic_methods.py b/py4DSTEM/process/phase/ptychographic_methods.py index 40d03b6b8..fa0b1db9f 100644 --- a/py4DSTEM/process/phase/ptychographic_methods.py +++ b/py4DSTEM/process/phase/ptychographic_methods.py @@ -1561,7 +1561,7 @@ def _gradient_descent_fourier_projection(self, amplitudes, overlap, fourier_mask xp = self._xp - fourier_overlap = xp.fft.fft2(overlap) # * fourier_mask # GV votes to include + fourier_overlap = xp.fft.fft2(overlap) # resample to match data, note: this needs to happen in reciprocal-space if self._resample_exit_waves: @@ -1573,6 +1573,7 @@ def _gradient_descent_fourier_projection(self, amplitudes, overlap, fourier_mask xp=xp, ) + fourier_overlap *= fourier_mask farfield_amplitudes = self._return_farfield_amplitudes(fourier_overlap) error = xp.sum(xp.abs(amplitudes - farfield_amplitudes) ** 2) fourier_modified_overlap = amplitudes * xp.exp(1j * xp.angle(fourier_overlap)) @@ -2724,7 +2725,7 @@ def _gradient_descent_fourier_projection(self, amplitudes, overlap, fourier_mask xp = self._xp - fourier_overlap = xp.fft.fft2(overlap) # * fourier_mask # GV votes to include + fourier_overlap = xp.fft.fft2(overlap) # resample to match data, note: this needs to happen in reciprocal-space if self._resample_exit_waves: @@ -2736,6 +2737,7 @@ def _gradient_descent_fourier_projection(self, amplitudes, overlap, fourier_mask xp=xp, ) + fourier_overlap *= fourier_mask farfield_amplitudes = self._return_farfield_amplitudes(fourier_overlap) error = xp.sum(xp.abs(amplitudes - farfield_amplitudes) ** 2) From facf1e12de6601fb08009680bd6c9bc82b452906 Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Fri, 15 Mar 2024 11:03:55 -0700 Subject: [PATCH 38/50] small bugfix, moving reset before object_type change --- .../phase/magnetic_ptychographic_tomography.py | 6 +++--- py4DSTEM/process/phase/magnetic_ptychography.py | 12 ++++++------ .../phase/mixedstate_multislice_ptychography.py | 6 +++--- py4DSTEM/process/phase/mixedstate_ptychography.py | 6 +++--- py4DSTEM/process/phase/multislice_ptychography.py | 6 +++--- py4DSTEM/process/phase/ptychographic_tomography.py | 6 +++--- py4DSTEM/process/phase/singleslice_ptychography.py | 6 +++--- 7 files changed, 24 insertions(+), 24 deletions(-) diff --git a/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py b/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py index 7f9eeda87..c9efae806 100644 --- a/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py +++ b/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py @@ -1032,6 +1032,9 @@ def reconstruct( "Magnetic ptychographic tomography is currently only implemented for gradient descent." ) + # initialization + self._reset_reconstruction(store_iterations, reset, use_projection_scheme) + if self._verbose: self._report_reconstruction_summary( num_iter, @@ -1056,9 +1059,6 @@ def reconstruct( else: detector_fourier_mask = xp.asarray(detector_fourier_mask) - # initialization - self._reset_reconstruction(store_iterations, reset, use_projection_scheme) - if gaussian_filter_sigma_m is None: gaussian_filter_sigma_m = gaussian_filter_sigma_e diff --git a/py4DSTEM/process/phase/magnetic_ptychography.py b/py4DSTEM/process/phase/magnetic_ptychography.py index 5da08e5c3..2e887739f 100644 --- a/py4DSTEM/process/phase/magnetic_ptychography.py +++ b/py4DSTEM/process/phase/magnetic_ptychography.py @@ -1321,9 +1321,6 @@ def reconstruct( ] self.copy_attributes_to_device(attrs, device) - if object_type is not None: - self._switch_object_type(object_type) - xp = self._xp xp_storage = self._xp_storage device = self._device @@ -1357,6 +1354,12 @@ def reconstruct( "Magnetic ptychography is currently only implemented for gradient descent." ) + # initialization + self._reset_reconstruction(store_iterations, reset, use_projection_scheme) + + if object_type is not None: + self._switch_object_type(object_type) + if self._verbose: self._report_reconstruction_summary( num_iter, @@ -1381,9 +1384,6 @@ def reconstruct( else: detector_fourier_mask = xp.asarray(detector_fourier_mask) - # initialization - self._reset_reconstruction(store_iterations, reset, use_projection_scheme) - if gaussian_filter_sigma_m is None: gaussian_filter_sigma_m = gaussian_filter_sigma_e diff --git a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py index 19e6b617c..119fc3a3c 100644 --- a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py @@ -899,6 +899,9 @@ def reconstruct( ] self.copy_attributes_to_device(attrs, device) + # initialization + self._reset_reconstruction(store_iterations, reset) + if object_type is not None: self._switch_object_type(object_type) @@ -951,9 +954,6 @@ def reconstruct( else: detector_fourier_mask = xp.asarray(detector_fourier_mask) - # initialization - self._reset_reconstruction(store_iterations, reset) - # main loop for a0 in tqdmnd( num_iter, diff --git a/py4DSTEM/process/phase/mixedstate_ptychography.py b/py4DSTEM/process/phase/mixedstate_ptychography.py index c57802ed8..bd650a931 100644 --- a/py4DSTEM/process/phase/mixedstate_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_ptychography.py @@ -793,6 +793,9 @@ def reconstruct( ] self.copy_attributes_to_device(attrs, device) + # initialization + self._reset_reconstruction(store_iterations, reset) + if object_type is not None: self._switch_object_type(object_type) @@ -845,9 +848,6 @@ def reconstruct( else: detector_fourier_mask = xp.asarray(detector_fourier_mask) - # initialization - self._reset_reconstruction(store_iterations, reset) - # main loop for a0 in tqdmnd( num_iter, diff --git a/py4DSTEM/process/phase/multislice_ptychography.py b/py4DSTEM/process/phase/multislice_ptychography.py index 943596428..db17cb1a8 100644 --- a/py4DSTEM/process/phase/multislice_ptychography.py +++ b/py4DSTEM/process/phase/multislice_ptychography.py @@ -875,6 +875,9 @@ def reconstruct( ] self.copy_attributes_to_device(attrs, device) + # initialization + self._reset_reconstruction(store_iterations, reset) + if object_type is not None: self._switch_object_type(object_type) @@ -927,9 +930,6 @@ def reconstruct( else: detector_fourier_mask = xp.asarray(detector_fourier_mask) - # initialization - self._reset_reconstruction(store_iterations, reset) - # main loop for a0 in tqdmnd( num_iter, diff --git a/py4DSTEM/process/phase/ptychographic_tomography.py b/py4DSTEM/process/phase/ptychographic_tomography.py index 7471d8bb4..f3b2991ab 100644 --- a/py4DSTEM/process/phase/ptychographic_tomography.py +++ b/py4DSTEM/process/phase/ptychographic_tomography.py @@ -939,6 +939,9 @@ def reconstruct( step_size, ) + # initialization + self._reset_reconstruction(store_iterations, reset, use_projection_scheme) + if self._verbose: self._report_reconstruction_summary( num_iter, @@ -963,9 +966,6 @@ def reconstruct( else: detector_fourier_mask = xp.asarray(detector_fourier_mask) - # initialization - self._reset_reconstruction(store_iterations, reset, use_projection_scheme) - # main loop for a0 in tqdmnd( num_iter, diff --git a/py4DSTEM/process/phase/singleslice_ptychography.py b/py4DSTEM/process/phase/singleslice_ptychography.py index d4568f11c..e1f14a90f 100644 --- a/py4DSTEM/process/phase/singleslice_ptychography.py +++ b/py4DSTEM/process/phase/singleslice_ptychography.py @@ -763,6 +763,9 @@ def reconstruct( ] self.copy_attributes_to_device(attrs, device) + # initialization + self._reset_reconstruction(store_iterations, reset) + if object_type is not None: self._switch_object_type(object_type) @@ -815,9 +818,6 @@ def reconstruct( else: detector_fourier_mask = xp.asarray(detector_fourier_mask) - # initialization - self._reset_reconstruction(store_iterations, reset) - # main loop for a0 in tqdmnd( num_iter, From 2d4928ee58a85f475b375fb8d6a4b03574b464c7 Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Mon, 18 Mar 2024 09:23:44 -0700 Subject: [PATCH 39/50] parallax odd pixel padding bugfix --- py4DSTEM/process/phase/parallax.py | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/py4DSTEM/process/phase/parallax.py b/py4DSTEM/process/phase/parallax.py index 46ca31737..c8c51054a 100644 --- a/py4DSTEM/process/phase/parallax.py +++ b/py4DSTEM/process/phase/parallax.py @@ -2750,19 +2750,33 @@ def _crop_padded_object( if upsampled: pad_x = np.round( + self._object_padding_px[0] * self._kde_upsample_factor + ).astype("int") + pad_x_left = np.round( self._object_padding_px[0] / 2 * self._kde_upsample_factor ).astype("int") + pad_x_right = pad_x - pad_x_left + pad_y = np.round( + self._object_padding_px[1] * self._kde_upsample_factor + ).astype("int") + pad_y_left = np.round( self._object_padding_px[1] / 2 * self._kde_upsample_factor ).astype("int") + pad_y_right = pad_y - pad_y_left + else: - pad_x = self._object_padding_px[0] // 2 - pad_y = self._object_padding_px[1] // 2 + pad_x_left = self._object_padding_px[0] // 2 + pad_x_right = self._object_padding_px[0] - pad_x_left + pad_y_left = self._object_padding_px[1] // 2 + pad_y_right = self._object_padding_px[1] - pad_y_left - pad_x -= remaining_padding - pad_y -= remaining_padding + pad_x_left -= remaining_padding + pad_x_right -= remaining_padding + pad_y_left -= remaining_padding + pad_y_right -= remaining_padding - return asnumpy(padded_object[pad_x:-pad_x, pad_y:-pad_y]) + return asnumpy(padded_object[pad_x_left:-pad_x_right, pad_y_left:-pad_y_right]) def _visualize_figax( self, From e132c87ae112735a9edb44981b81a442bd49bacf Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Mon, 18 Mar 2024 09:36:59 -0700 Subject: [PATCH 40/50] parallax upsample, fit, and correct should return self --- py4DSTEM/process/phase/parallax.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/py4DSTEM/process/phase/parallax.py b/py4DSTEM/process/phase/parallax.py index c8c51054a..c66742d7e 100644 --- a/py4DSTEM/process/phase/parallax.py +++ b/py4DSTEM/process/phase/parallax.py @@ -1755,6 +1755,8 @@ def subpixel_alignment( self.clear_device_mem(self._device, self._clear_fft_cache) + return self + def _interpolate_array( self, image, @@ -2390,6 +2392,8 @@ def score_CTF(coefs): self.clear_device_mem(self._device, self._clear_fft_cache) + return self + def _calculate_CTF(self, alpha_shape, sampling, *coefs): xp = self._xp @@ -2583,6 +2587,7 @@ def aberration_correct( ax.set_title("Parallax-Corrected Phase Image") self.clear_device_mem(self._device, self._clear_fft_cache) + return self def depth_section( self, From 22fc8b9c023c1eee2e0108e859f4cb54f0ec1f41 Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Mon, 18 Mar 2024 11:10:33 -0700 Subject: [PATCH 41/50] removing Wiener filter --- py4DSTEM/process/phase/parallax.py | 68 +++++------------------------- 1 file changed, 11 insertions(+), 57 deletions(-) diff --git a/py4DSTEM/process/phase/parallax.py b/py4DSTEM/process/phase/parallax.py index c66742d7e..88c1dff42 100644 --- a/py4DSTEM/process/phase/parallax.py +++ b/py4DSTEM/process/phase/parallax.py @@ -2441,9 +2441,6 @@ def aberration_correct( plot_corrected_phase: bool = True, k_info_limit: float = None, k_info_power: float = 1.0, - Wiener_filter=False, - Wiener_signal_noise_ratio: float = 1.0, - Wiener_filter_low_only: bool = False, upsampled: bool = True, **kwargs, ): @@ -2461,12 +2458,6 @@ def aberration_correct( maximum allowed frequency in butterworth filter k_info_power: float, optional power of butterworth filter - Wiener_filter: bool, optional - Use Wiener filtering instead of CTF sign correction. - Wiener_signal_noise_ratio: float, optional - Signal to noise radio at k = 0 for Wiener filter - Wiener_filter_low_only: bool, optional - Apply Wiener filtering only to the CTF portions before the 1st CTF maxima. """ xp = self._xp @@ -2500,58 +2491,23 @@ def aberration_correct( use_CTF_fit = True if use_CTF_fit: - sin_chi = np.sin( + sin_chi = xp.sin( self._calculate_CTF(im.shape, (sx, sy), *self._aberrations_coefs) ) - - CTF_corr = xp.sign(sin_chi) - CTF_corr[0, 0] = 0 - - # apply correction to mean reconstructed BF image - im_fft_corr = xp.fft.fft2(im) * CTF_corr - - # if needed, add low pass filter output image - if k_info_limit is not None: - im_fft_corr /= 1 + (kra2**k_info_power) / ( - (k_info_limit) ** (2 * k_info_power) - ) else: - # CTF sin_chi = xp.sin((xp.pi * self._wavelength * self.aberration_C1) * kra2) - if Wiener_filter: - SNR_inv = ( - xp.sqrt( - 1 - + (kra2**k_info_power) / ((k_info_limit) ** (2 * k_info_power)) - ) - / Wiener_signal_noise_ratio - ) - CTF_corr = xp.sign(sin_chi) / (sin_chi**2 + SNR_inv) - if Wiener_filter_low_only: - # limit Wiener filter to only the part of the CTF before 1st maxima - k_thresh = 1 / xp.sqrt( - 2.0 * self._wavelength * xp.abs(self.aberration_C1) - ) - k_mask = kra2 >= k_thresh**2 - CTF_corr[k_mask] = xp.sign(sin_chi[k_mask]) - - # apply correction to mean reconstructed BF image - im_fft_corr = xp.fft.fft2(im) * CTF_corr - - else: - # CTF without tilt correction (beyond the parallax operator) - CTF_corr = xp.sign(sin_chi) - CTF_corr[0, 0] = 0 + CTF_corr = xp.sign(sin_chi) + CTF_corr[0, 0] = 0 - # apply correction to mean reconstructed BF image - im_fft_corr = xp.fft.fft2(im) * CTF_corr + # apply correction to mean reconstructed BF image + im_fft_corr = xp.fft.fft2(im) * CTF_corr - # if needed, add low pass filter output image - if k_info_limit is not None: - im_fft_corr /= 1 + (kra2**k_info_power) / ( - (k_info_limit) ** (2 * k_info_power) - ) + # if needed, add low pass filter output image + if k_info_limit is not None: + im_fft_corr /= 1 + (kra2**k_info_power) / ( + (k_info_limit) ** (2 * k_info_power) + ) # Output phase image self._recon_phase_corrected = xp.real(xp.fft.ifft2(im_fft_corr)) @@ -2683,9 +2639,7 @@ def depth_section( ) # CTF correction - sin_chi = xp.sin( - (xp.pi * self._wavelength * (self.aberration_C1 + dz)) * kra2 - ) + sin_chi = xp.sin((xp.pi * self._wavelength * (self.aberration_C1)) * kra2) CTF_corr = xp.sign(sin_chi) CTF_corr[0, 0] = 0 if k_info_limit is not None: From b0bbc52da4d5ebce09290a191c617178c3c76741 Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Mon, 18 Mar 2024 13:13:28 -0700 Subject: [PATCH 42/50] fixing depth sectioning --- py4DSTEM/process/phase/parallax.py | 63 +++++++++++++++--------------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/py4DSTEM/process/phase/parallax.py b/py4DSTEM/process/phase/parallax.py index 88c1dff42..aa9323900 100644 --- a/py4DSTEM/process/phase/parallax.py +++ b/py4DSTEM/process/phase/parallax.py @@ -2547,7 +2547,8 @@ def aberration_correct( def depth_section( self, - depth_angstroms=np.arange(-250, 260, 100), + depth_angstroms=None, + use_CTF_fit=True, plot_depth_sections=True, k_info_limit: float = None, k_info_power: float = 1.0, @@ -2576,7 +2577,6 @@ def depth_section( xp = self._xp asnumpy = self._asnumpy - depth_angstroms = xp.atleast_1d(depth_angstroms) if not hasattr(self, "aberration_C1"): raise ValueError( @@ -2586,16 +2586,26 @@ def depth_section( ) ) + if depth_angstroms is None: + depth_angstroms = np.linspace(-256, 256, 33) + depth_angstroms = xp.atleast_1d(depth_angstroms) + # Fourier coordinates - kx = xp.fft.fftfreq(self._recon_BF.shape[0], self._scan_sampling[0]) - ky = xp.fft.fftfreq(self._recon_BF.shape[1], self._scan_sampling[1]) + sx, sy = self._scan_sampling + nx, ny = self._recon_BF.shape + kx = xp.fft.fftfreq(nx, sx) + ky = xp.fft.fftfreq(ny, sy) kra2 = (kx[:, None]) ** 2 + (ky[None, :]) ** 2 - # information limit - if k_info_limit is not None: - k_filt = 1 / ( - 1 + (kra2**k_info_power) / ((k_info_limit) ** (2 * k_info_power)) + if use_CTF_fit: + sin_chi = xp.sin( + self._calculate_CTF((nx, ny), (sx, sy), *self._aberrations_coefs) ) + else: + sin_chi = xp.sin((xp.pi * self._wavelength * self.aberration_C1) * kra2) + + CTF_corr = xp.sign(sin_chi) + CTF_corr[0, 0] = 0 # init stack_depth = xp.zeros( @@ -2630,26 +2640,21 @@ def depth_section( dz = depth_angstroms[a0] # Parallax - im_depth = xp.zeros_like(self._recon_BF, dtype="complex") - for a1 in range(self._stack_BF_shifted.shape[0]): - dx = self._probe_angles[a1, 0] * dz - dy = self._probe_angles[a1, 1] * dz - im_depth += xp.fft.fft2(self._stack_BF_shifted[a1]) * xp.exp( - self._qx_shift * dx + self._qy_shift * dy - ) + im_depth = xp.zeros_like(self._recon_BF, dtype=xp.complex64) + dx = -self._probe_angles[:, 0] * dz / self._scan_sampling[0] + dy = -self._probe_angles[:, 1] * dz / self._scan_sampling[1] + shift_op = xp.exp( + self._qx_shift[None] * dx[:, None, None] + + self._qy_shift[None] * dy[:, None, None] + ) + im_depth = xp.fft.fft2(self._stack_BF_shifted) * shift_op * CTF_corr - # CTF correction - sin_chi = xp.sin((xp.pi * self._wavelength * (self.aberration_C1)) * kra2) - CTF_corr = xp.sign(sin_chi) - CTF_corr[0, 0] = 0 if k_info_limit is not None: - CTF_corr *= k_filt + im_depth /= 1 + (kra2**k_info_power) / ( + (k_info_limit) ** (2 * k_info_power) + ) - # apply correction to mean reconstructed BF image - stack_depth[a0] = ( - xp.real(xp.fft.ifft2(im_depth * CTF_corr)) - / self._stack_BF_shifted.shape[0] - ) + stack_depth[a0] = xp.real(xp.fft.ifft2(im_depth)).mean(0) if plot_depth_sections: row_index, col_index = np.unravel_index(a0, (nrows, ncols)) @@ -2673,13 +2678,9 @@ def depth_section( ax.set_xticks([]) ax.set_yticks([]) - ax.set_title(f"Depth section: {dz}A") - - if self._device == "gpu": - xp = self._xp - xp._default_memory_pool.free_all_blocks() - xp.clear_memo() + ax.set_title(f"Depth section: {dz} A") + self.clear_device_mem(self._device, self._clear_fft_cache) return stack_depth def _crop_padded_object( From 5bd35b7066183876bf131594ca70658fbb2907ba Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Mar 2024 19:04:47 -0400 Subject: [PATCH 43/50] Bump tj-actions/changed-files from 42 to 43 (#629) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 42 to 43. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v42...v43) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/pypi_upload.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pypi_upload.yml b/.github/workflows/pypi_upload.yml index b6b3e2d59..1c13429b1 100644 --- a/.github/workflows/pypi_upload.yml +++ b/.github/workflows/pypi_upload.yml @@ -21,7 +21,7 @@ jobs: token: ${{ secrets.GH_ACTION_VERSION_UPDATE }} - name: Get changed files id: changed-files-specific - uses: tj-actions/changed-files@v42 + uses: tj-actions/changed-files@v43 with: files: | py4DSTEM/version.py From 5380901b24df3226425988a9835c10a33f766045 Mon Sep 17 00:00:00 2001 From: Georgios Varnavides Date: Wed, 20 Mar 2024 19:14:11 -0700 Subject: [PATCH 44/50] fixing aberration correction for odd radial symmetries. I think the sign ought to be negative, still checking with sims --- py4DSTEM/process/phase/parallax.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/py4DSTEM/process/phase/parallax.py b/py4DSTEM/process/phase/parallax.py index aa9323900..060a151aa 100644 --- a/py4DSTEM/process/phase/parallax.py +++ b/py4DSTEM/process/phase/parallax.py @@ -2491,13 +2491,25 @@ def aberration_correct( use_CTF_fit = True if use_CTF_fit: - sin_chi = xp.sin( - self._calculate_CTF(im.shape, (sx, sy), *self._aberrations_coefs) - ) + even_radial_orders = (self._aberrations_mn[:, 0] % 2) == 1 + odd_radial_orders = (self._aberrations_mn[:, 0] % 2) == 0 + + odd_coefs = self._aberrations_coefs.copy() + odd_coefs[even_radial_orders] = 0 + chi_odd = self._calculate_CTF(im.shape, (sx, sy), *odd_coefs) + + even_coefs = self._aberrations_coefs.copy() + even_coefs[odd_radial_orders] = 0 + chi_even = self._calculate_CTF(im.shape, (sx, sy), *even_coefs) + + if not chi_even.any(): # check if all zeros + chi_even = xp.ones_like(chi_even) + else: - sin_chi = xp.sin((xp.pi * self._wavelength * self.aberration_C1) * kra2) + chi_even = (xp.pi * self._wavelength * self.aberration_C1) * kra2 + chi_odd = xp.zeros_like(chi_even) - CTF_corr = xp.sign(sin_chi) + CTF_corr = xp.sign(xp.sin(chi_even)) * xp.exp(-1j * chi_odd) CTF_corr[0, 0] = 0 # apply correction to mean reconstructed BF image From bc8f3338c0696c4afd9b3bab9077b18f4ad0b8c2 Mon Sep 17 00:00:00 2001 From: Colin Ophus Date: Thu, 21 Mar 2024 10:24:30 -0700 Subject: [PATCH 45/50] ACOM module - fix for .ang file writer (#382) * adding annotation import * fft normalization * upsampling limit bugs * bilinear sampling during position correction * sequential numerator-denominator pixel rolling to save memory * some useful warnings for now * add occupancy to CIF reader * add occupancy to structure factors * format with black * removing optional conda comment * adding dependencies comment * Adding correlation metrics * Correlation coefs finished * Black formatting * normalize_order = 0 bug * plotting fixes * switching to gray * allowing python 12.1 * changing python requirement to <3.13 * changing install requirements * adding initial fixes * changing how peaks are accessed * changing to PLA from BraggVectors * changing to PLA from BraggVectors * moving import to top of file * black * removing hardcoded link to sample collections * adding overwrite * typo * typo * removing accidental noteobok * back to hardcoded id * changing filename for FCU-Net file * Adding ability to force filename for collection Items * back to non hardcoded now gdrive function fixed * removing print statements * bumping allowed version of tensorflow * black * adding strain mapping back into WPF * Finishing up * Fixing minor bugs * Black formatting * removing test plotting * doc strains for visualize strain * bugs * doc string update * fixing reset=False bug * explicit bin order * some plotting preferences * fixing upsampling limits * explicit aberration orders fit * fixing abtem bug, adding conversion to polar property * adding ability to pass descan_correction_fit_function * DCT-based poisson solver phase unwrapping * constraints refactoring * fixing reshaping bugs * cleaning up calibrations * cleaning up calculate rotation * adding grid search functionality to optimize * removing tuning functions from subclasses - moved to optimizer * Update corr_kernel_size documentation * small typo * cupy numpy bug * refactoring object and probe methods * multislice plotting tweaks * moved read-only self-attribute calls up-front. Might remove all-together, especially for positions * cleaned up single-slice preprocess * cleaned up multislice preprocess * cleaned up mixedstate preprocess * underscore typos * underscore typos * huh, not how super works * cleaned up mixed-multi slice preprocess * cleaned up overlap tomography preprocess * removing redundant tune func from parallax * moved single-slice forward and adjoint to methods.py * added necessary multi-slice forward and adjoint methods * added necessary mixed-state forward and adjoint methods * added necessary mixed-state-multi-slce forward and adjoint methods * removed redundant forward and adjoint methods from overlap tomo * cleaned up single-slice reconstruct * cleaned up multi-slice reconstruct * cleaned up mixed-state reconstruct * cleaned up mixed-state multi-slice reconstruct * cleaned up overlap tomo reconstruct, different probes per tilt * moved show_transmitted probe * cleaned up self-consistency viz * oops, forgot to delete duplicate code * cleaned up position correction - is mixed state correct? * more natural location for 3D function * Bump tj-actions/changed-files from 39 to 41 in /.github/workflows Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 39 to 41. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v39...v41) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production ... Signed-off-by: dependabot[bot] * added and cleaned up total position update functionality back in * modernized probe position correction viz * starting viz overhaul * remove redundant figax viz functions * added visualize_last functionality * visualize_all for single-slice * visualize_all for all * small np cp bugs * some figsize defaults * histogram scaling for all * added virtual detector mask * adding dependabot.yml * update to check_config workflow * black * adding dp_mask argument * renamed to ptycho-tomo, started magnetic refactoring * arina reader bug fix * reconstruct functionality for iterative magnetic * splitting up constraints * bumping python version to 3.10 * correctly handling collective updates constraints * Bugfix for forced scan sampling Correctly set scan units when `force_scan_sampling` is used * finished with magnetic ptycho * phase unwrapping bugfixes * Update py4DSTEM/io/filereaders/read_arina.py Co-authored-by: Steve Zeltmann <37132012+sezelt@users.noreply.github.com> * Update py4DSTEM/process/phase/utils.py Co-authored-by: Steve Zeltmann <37132012+sezelt@users.noreply.github.com> * Update py4DSTEM/process/phase/utils.py Co-authored-by: Steve Zeltmann <37132012+sezelt@users.noreply.github.com> * Update py4DSTEM/process/phase/utils.py Co-authored-by: Steve Zeltmann <37132012+sezelt@users.noreply.github.com> * Update py4DSTEM/process/phase/utils.py Co-authored-by: Steve Zeltmann <37132012+sezelt@users.noreply.github.com> * better support for multiple measurements probe properties * broadcasting polar coordinates * Update py4DSTEM/process/phase/utils.py Co-authored-by: Steve Zeltmann <37132012+sezelt@users.noreply.github.com> * Update py4DSTEM/process/phase/utils.py Co-authored-by: Steve Zeltmann <37132012+sezelt@users.noreply.github.com> * docstrings * vectorized_fourier_resample docstrings * generalized show_fourier_probe, added general show_probe, added intensity reporting * visual tweaks * removed leading iterative_ from file names and trailing Reconstruction from class names * typo * FFT plotting improvements * Revert "FFT plotting improvements" This reverts commit 6669195c0d439d9d41fee6d8c6a6fc0860f0091e. * Revert "typo" This reverts commit 5990942213ceb4ee0b8491c19a3c7dc06985c88f. * Revert "removed leading iterative_ from file names and trailing Reconstruction from class names" This reverts commit 9114df505727362c515c5115860c9deb42dbaa1c. * removed leading iterative_ from file names and trailing Reconstruction from class names * typo * FFT plotting improvements * switched divergence field functions to be periodic * magnetic ptycho-tomo preprocessing * magnetic ptycho tomo constraints * magnetic ptycho tomo works * improved 3D visualizations * ragged list partitions support for complex plotting * added detector plane resampling * cleaned up cupyx.scipy imports * added storage support for singleslice. other classes likely broken, will fix tomorrow * actually enable overwriting device * fixing single-slice projection sets bugs with storage refactor * cleaning up multislice for device. changed propagator tilt convention to negative mrad * added storage support to multislice * adding mixed-state storage support * added storage to mixed-multislice * attrs copying cleanup * added storage to dpc * more dpc viz tweaks * adding vectorized flag to CoM * dp_mask can be None, dub * adding batch size in single-slice preprocess * adding to dpc, multislice, mixedstate, multislice-mixedstate * adding some basic device cleanup to parallax, no storage yer * magnetic ptycho preprocess storage support * added full storage support for magnetic ptycho * added storage to ptycho tomo * added storage support to magnetic ptycho-tomo * transferring parallax bug Steph found in phase_contrast * Update CONTRIBUTORS.md * Update CONTRIBUTORS.md * Update CONTRIBUTORS.md * Update CONTRIBUTORS.md * Update CONTRIBUTORS.md * tweaks to clear_fft_cache * Create CODE_OF_CONDUCT.md This is a suggested template that seems widely used and seems reasonable. * adding FFT-based DCT-II implementations, fixing cupy10.6 bug * small numpy bug * cleaned up cupy 12 feature guarding using try-except * restored scikit functionality * scikit-image by default, poisson as flag * various fixes discovered while making testing notebook * position update bug fix * syntax update for bug fix * complex plotting grid search * multislice grid search plotting * read/write bug fix * show hanning window fix * typo fix * mixedstate probe fourier constraint bug fix * constrain first aperture only * parallax verbosity to True * ms butterworth bug * silly George * cleaning up warnings * simplifying warnings, restructuring single slice regularization flags * one more read write bug fix * more flags more problems: multislice, mixed state, and mixed-multislice * dpc * mistake in multislice * flags to magnetic ptycho * ptycho tomo flags * magnetic ptycho tomo flags * stricter flags * perhaps i forgot an underscore * small parallax change * small read-write changes * removing switch_obj_iter * remore switch_iter from all classes * show_fft bug * Bump tj-actions/changed-files from 41 to 42 Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 41 to 42. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v41...v42) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Bump github/super-linter from 4 to 5 Bumps [github/super-linter](https://github.com/github/super-linter) from 4 to 5. - [Changelog](https://github.com/github/super-linter/blob/main/docs/release-process.md) - [Commits](https://github.com/github/super-linter/compare/v4...v5) --- updated-dependencies: - dependency-name: github/super-linter dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Bump actions/checkout from 3 to 4 Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Bump actions/setup-python from 2 to 5 Bumps [actions/setup-python](https://github.com/actions/setup-python) from 2 to 5. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v2...v5) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * minor fixes * Friedel origin finder, various FEM tools (#607) * bug fix * Revert "bug fix" This reverts commit cbce25cd754f1af68c6aded6bfe6c329425fcbae. * num_probes_fit_aberrations * flip 180 option for read arina * ptycho positions offset * remove 180 flip * median_filter_masked_pixels * save polar aberrations instead * read-write compatibility * uncertainty viz bug * adding force_com_measured functionality to ptycho * clean up force_com_measured * adding DP normalization progress bar * moving fov_mask calc to as needed * adding detector_fourier_mask * Auto center finding works! * Adding center finding without a mask * Adding plot range to show_origin_fit, fixing bug * Adding local mean / variance functions for FEM * Adding symmetry analysis and plotting * Fix divide by zero in correlation origin finding function * bug fix for filtering * adding initial commit for friedel correlation origin finder * Correlation working, but mask still buggy * Fixing the masked CC * Simplifying the expression * cleaning up - still need subpixel shifts * Update origin fitting visualization * adding device arg to upsample function * First attempt to add Friedel origin finding to ptycho GPU not yet working * Adding GPU implementation warning * parabolic subpixel fitting * minor updates * Going back to dev version of phase contrast * Changing np to xp for GPU compatibility * Fixing xp = np device options * Revering phase contrast options back to dev * Cleaning up code, fixing GPU support * black formatting * black updates * Adding annular symmetry plotting function * black formatting * cleaning typos and dead code and such * Update py4DSTEM/process/polar/polar_analysis.py Co-authored-by: Steve Zeltmann <37132012+sezelt@users.noreply.github.com> --------- Co-authored-by: Stephanie Ribet Co-authored-by: Georgios Varnavides Co-authored-by: gvarnavi Co-authored-by: SE Zeltmann Co-authored-by: Steve Zeltmann <37132012+sezelt@users.noreply.github.com> * Release memory after Bayesian optimization (#599) * do not retain optimization function after finished running, to release memory * update comment * set clear_fft_cache off in optimizer defaults * Update parameter_optimize.py * format with black 24.1.1 * Update parameter_optimize.py * format with black * Added phase_contrast warning in README (#608) * Adding error for orix install requirements * ACOM .ang file writer fixes * format with black --------- Signed-off-by: dependabot[bot] Co-authored-by: alex-rakowski Co-authored-by: Georgios Varnavides Co-authored-by: bsavitzky Co-authored-by: Steven Zeltmann Co-authored-by: Steve Zeltmann <37132012+sezelt@users.noreply.github.com> Co-authored-by: Stephanie Ribet Co-authored-by: SE Zeltmann Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: gvarnavi --- py4DSTEM/process/diffraction/crystal_ACOM.py | 23 +++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/py4DSTEM/process/diffraction/crystal_ACOM.py b/py4DSTEM/process/diffraction/crystal_ACOM.py index 83dff29e1..0885014e5 100644 --- a/py4DSTEM/process/diffraction/crystal_ACOM.py +++ b/py4DSTEM/process/diffraction/crystal_ACOM.py @@ -2192,6 +2192,7 @@ def save_ang_file( pixel_units="px", transpose_xy=True, flip_x=False, + flip_y=False, ): """ This function outputs an ascii text file in the .ang format, containing @@ -2211,8 +2212,10 @@ def save_ang_file( nothing """ - - from orix.io.plugins.ang import file_writer + try: + from orix.io.plugins.ang import file_writer + except ImportError: + raise Exception("orix failed to import; try pip installing separately") xmap = self.orientation_map_to_orix_CrystalMap( orientation_map, @@ -2222,6 +2225,7 @@ def save_ang_file( return_color_key=False, transpose_xy=transpose_xy, flip_x=flip_x, + flip_y=flip_y, ) file_writer(file_name, xmap) @@ -2235,6 +2239,7 @@ def orientation_map_to_orix_CrystalMap( pixel_units="px", transpose_xy=True, flip_x=False, + flip_y=False, return_color_key=False, ): try: @@ -2260,12 +2265,20 @@ def orientation_map_to_orix_CrystalMap( import warnings - # Get orientation matrices + # Get orientation matrices and correlation signal (will be used as iq and ci) orientation_matrices = orientation_map.matrix[:, :, ind_orientation].copy() + corr_values = orientation_map.corr[:, :, ind_orientation].copy() + + # Check for transpose if transpose_xy: orientation_matrices = np.transpose(orientation_matrices, (1, 0, 2, 3)) + corr_values = np.transpose(corr_values, (1, 0)) if flip_x: orientation_matrices = np.flip(orientation_matrices, axis=0) + corr_values = np.flip(corr_values, axis=0) + if flip_y: + orientation_matrices = np.flip(orientation_matrices, axis=1) + corr_values = np.flip(corr_values, axis=1) # Convert the orientation matrices into Euler angles # suppress Gimbal lock warnings @@ -2327,8 +2340,8 @@ def fxn(): y=coords["y"], phase_list=PhaseList(phase), prop={ - "iq": orientation_map.corr[:, :, ind_orientation].ravel(), - "ci": orientation_map.corr[:, :, ind_orientation].ravel(), + "iq": corr_values.ravel(), + "ci": corr_values.ravel(), }, scan_unit=pixel_units, ) From 74ede03e834fb095b811ddc824f345981a193aed Mon Sep 17 00:00:00 2001 From: Colin Ophus Date: Thu, 21 Mar 2024 14:37:38 -0700 Subject: [PATCH 46/50] Fixing projection 2D thickness (#631) * initial commit for projected potential * Atom coordinates mostly working Some tiling issue for image corners * Projected potential now working with bugs projection algebra definitely has a bug * minor tweak * Projected potentials fixed? * adding figsize * update docstring * Adding thickness projection * Fourier method makes boundary conditions difficult * Updated plotting * Adding robust fitting to ACOM strain mapping * Updating matching * Fixing thickness projection in 2D potentials * black * Remove testing lines. * Trying (and failing) to figure out the potential units * Black formatting * Black again --- py4DSTEM/process/diffraction/crystal.py | 240 ++++++++++++++++++ py4DSTEM/process/diffraction/crystal_ACOM.py | 103 ++++++-- .../process/diffraction/crystal_calibrate.py | 8 +- py4DSTEM/process/utils/single_atom_scatter.py | 36 +++ 4 files changed, 358 insertions(+), 29 deletions(-) diff --git a/py4DSTEM/process/diffraction/crystal.py b/py4DSTEM/process/diffraction/crystal.py index aa1eb8555..97d596ec4 100644 --- a/py4DSTEM/process/diffraction/crystal.py +++ b/py4DSTEM/process/diffraction/crystal.py @@ -1,6 +1,7 @@ # Functions for calculating diffraction patterns, matching them to experiments, and creating orientation and phase maps. import numpy as np +from scipy.ndimage import gaussian_filter import matplotlib.pyplot as plt from matplotlib.patches import Circle from fractions import Fraction @@ -967,6 +968,245 @@ def generate_ring_pattern( if return_calc is True: return radii_unique, intensity_unique + def generate_projected_potential( + self, + im_size=(256, 256), + pixel_size_angstroms=0.1, + potential_radius_angstroms=3.0, + sigma_image_blur_angstroms=0.1, + thickness_angstroms=100, + power_scale=1.0, + plot_result=False, + figsize=(6, 6), + orientation: Optional[Orientation] = None, + ind_orientation: Optional[int] = 0, + orientation_matrix: Optional[np.ndarray] = None, + zone_axis_lattice: Optional[np.ndarray] = None, + proj_x_lattice: Optional[np.ndarray] = None, + zone_axis_cartesian: Optional[np.ndarray] = None, + proj_x_cartesian: Optional[np.ndarray] = None, + ): + """ + Generate an image of the projected potential of crystal in real space, + using cell tiling, and a lookup table of the atomic potentials. + Note that we round atomic positions to the nearest pixel for speed. + + TODO - fix scattering prefactor so that output units are sensible. + + Parameters + ---------- + im_size: tuple, list, np.array + (2,) vector specifying the output size in pixels. + pixel_size_angstroms: float + Pixel size in Angstroms. + potential_radius_angstroms: float + Radius in Angstroms for how far to integrate the atomic potentials + sigma_image_blur_angstroms: float + Image blurring in Angstroms. + thickness_angstroms: float + Thickness of the sample in Angstroms. + Set thickness_thickness_angstroms = 0 to skip thickness projection. + power_scale: float + Power law scaling of potentials. Set to 2.0 to approximate Z^2 images. + plot_result: bool + Plot the projected potential image. + figsize: + (2,) vector giving the size of the output. + + orientation: Orientation + An Orientation class object + ind_orientation: int + If input is an Orientation class object with multiple orientations, + this input can be used to select a specific orientation. + orientation_matrix: array + (3,3) orientation matrix, where columns represent projection directions. + zone_axis_lattice: array + (3,) projection direction in lattice indices + proj_x_lattice: array) + (3,) x-axis direction in lattice indices + zone_axis_cartesian: array + (3,) cartesian projection direction + proj_x_cartesian: array + (3,) cartesian projection direction + + Returns + -------- + im_potential: (np.array) + Output image of the projected potential. + + """ + + # Determine image size in Angstroms + im_size = np.array(im_size) + im_size_Ang = im_size * pixel_size_angstroms + + # Parse orientation inputs + if orientation is not None: + if ind_orientation is None: + orientation_matrix = orientation.matrix[0] + else: + orientation_matrix = orientation.matrix[ind_orientation] + elif orientation_matrix is None: + orientation_matrix = self.parse_orientation( + zone_axis_lattice, proj_x_lattice, zone_axis_cartesian, proj_x_cartesian + ) + + # Rotate unit cell into projection direction + lat_real = self.lat_real.copy() @ orientation_matrix + + # Determine unit cell axes to tile over, by selecting 2/3 with largest in-plane component + inds_tile = np.argsort(np.linalg.norm(lat_real[:, 0:2], axis=1))[1:3] + m_tile = lat_real[inds_tile, :] + # Vector projected along optic axis + m_proj = np.squeeze(np.delete(lat_real, inds_tile, axis=0)) + + # Determine tiling range + p_corners = np.array( + [ + [-im_size_Ang[0] * 0.5, -im_size_Ang[1] * 0.5, 0.0], + [im_size_Ang[0] * 0.5, -im_size_Ang[1] * 0.5, 0.0], + [-im_size_Ang[0] * 0.5, im_size_Ang[1] * 0.5, 0.0], + [im_size_Ang[0] * 0.5, im_size_Ang[1] * 0.5, 0.0], + ] + ) + ab = np.linalg.lstsq(m_tile[:, :2].T, p_corners[:, :2].T, rcond=None)[0] + ab = np.floor(ab) + a_range = np.array((np.min(ab[0]) - 1, np.max(ab[0]) + 2)) + b_range = np.array((np.min(ab[1]) - 1, np.max(ab[1]) + 2)) + + # Tile unit cell + a_ind, b_ind, atoms_ind = np.meshgrid( + np.arange(a_range[0], a_range[1]), + np.arange(b_range[0], b_range[1]), + np.arange(self.positions.shape[0]), + ) + abc_atoms = self.positions[atoms_ind.ravel(), :] + abc_atoms[:, inds_tile[0]] += a_ind.ravel() + abc_atoms[:, inds_tile[1]] += b_ind.ravel() + xyz_atoms_ang = abc_atoms @ lat_real + atoms_ID_all = self.numbers[atoms_ind.ravel()] + + # Center atoms on image plane + x = xyz_atoms_ang[:, 0] / pixel_size_angstroms + im_size[0] / 2.0 + y = xyz_atoms_ang[:, 1] / pixel_size_angstroms + im_size[1] / 2.0 + atoms_del = np.logical_or.reduce( + ( + x <= -potential_radius_angstroms / 2, + y <= -potential_radius_angstroms / 2, + x >= im_size[0] + potential_radius_angstroms / 2, + y >= im_size[1] + potential_radius_angstroms / 2, + ) + ) + x = np.delete(x, atoms_del) + y = np.delete(y, atoms_del) + atoms_ID_all = np.delete(atoms_ID_all, atoms_del) + + # Coordinate system for atomic projected potentials + potential_radius = np.ceil(potential_radius_angstroms / pixel_size_angstroms) + R = np.arange(0.5 - potential_radius, potential_radius + 0.5) + R_ind = R.astype("int") + R_2D = np.sqrt(R[:, None] ** 2 + R[None, :] ** 2) + + # Lookup table for atomic projected potentials + atoms_ID = np.unique(self.numbers) + atoms_lookup = np.zeros( + ( + atoms_ID.shape[0], + R_2D.shape[0], + R_2D.shape[1], + ) + ) + for a0 in range(atoms_ID.shape[0]): + atom_sf = single_atom_scatter([atoms_ID[a0]]) + atoms_lookup[a0, :, :] = atom_sf.projected_potential(atoms_ID[a0], R_2D) + atoms_lookup **= power_scale + + # Thickness + if thickness_angstroms > 0: + num_proj = np.round(thickness_angstroms / np.abs(m_proj[2])).astype("int") + if num_proj > 1: + vec_proj = m_proj[:2] / pixel_size_angstroms + shifts = np.arange(num_proj).astype("float") + shifts -= np.mean(shifts) + x_proj = shifts * vec_proj[0] + y_proj = shifts * vec_proj[1] + else: + num_proj = 1 + + # initialize potential + im_potential = np.zeros(im_size) + + # Add atoms to potential image + for a0 in range(atoms_ID_all.shape[0]): + ind = np.argmin(np.abs(atoms_ID - atoms_ID_all[a0])) + + if num_proj > 1: + for a1 in range(num_proj): + x_ind = np.round(x[a0] + x_proj[a1]).astype("int") + R_ind + y_ind = np.round(y[a0] + y_proj[a1]).astype("int") + R_ind + x_sub = np.logical_and( + x_ind >= 0, + x_ind < im_size[0], + ) + y_sub = np.logical_and( + y_ind >= 0, + y_ind < im_size[1], + ) + + im_potential[ + x_ind[x_sub][:, None], y_ind[y_sub][None, :] + ] += atoms_lookup[ind][x_sub, :][:, y_sub] + + else: + x_ind = np.round(x[a0]).astype("int") + R_ind + y_ind = np.round(y[a0]).astype("int") + R_ind + x_sub = np.logical_and( + x_ind >= 0, + x_ind < im_size[0], + ) + y_sub = np.logical_and( + y_ind >= 0, + y_ind < im_size[1], + ) + + im_potential[ + x_ind[x_sub][:, None], y_ind[y_sub][None, :] + ] += atoms_lookup[ind][x_sub, :][:, y_sub] + + if thickness_angstroms > 0: + im_potential /= num_proj + + # if needed, apply gaussian blurring + if sigma_image_blur_angstroms > 0: + sigma_image_blur = sigma_image_blur_angstroms / pixel_size_angstroms + im_potential = gaussian_filter( + im_potential, + sigma_image_blur, + mode="nearest", + ) + + if plot_result: + # quick plotting of the result + int_vals = np.sort(im_potential.ravel()) + int_range = np.array( + ( + int_vals[np.round(0.02 * int_vals.size).astype("int")], + int_vals[np.round(0.98 * int_vals.size).astype("int")], + ) + ) + + fig, ax = plt.subplots(figsize=figsize) + ax.imshow( + im_potential, + cmap="turbo", + vmin=int_range[0], + vmax=int_range[1], + ) + ax.set_axis_off() + ax.set_aspect("equal") + + return im_potential + # Vector conversions and other utilities for Crystal classes def cartesian_to_lattice(self, vec_cartesian): vec_lattice = self.lat_inv @ vec_cartesian diff --git a/py4DSTEM/process/diffraction/crystal_ACOM.py b/py4DSTEM/process/diffraction/crystal_ACOM.py index 0885014e5..7ac33957e 100644 --- a/py4DSTEM/process/diffraction/crystal_ACOM.py +++ b/py4DSTEM/process/diffraction/crystal_ACOM.py @@ -2025,6 +2025,9 @@ def calculate_strain( tol_intensity: float = 1e-4, k_max: Optional[float] = None, min_num_peaks=5, + intensity_weighting=False, + robust=True, + robust_thresh=3.0, rotation_range=None, mask_from_corr=True, corr_range=(0, 2), @@ -2039,24 +2042,46 @@ def calculate_strain( TODO: add robust fitting? - Args: - bragg_peaks_array (PointListArray): All Bragg peaks - orientation_map (OrientationMap): Orientation map generated from ACOM - corr_kernel_size (float): Correlation kernel size - if user does - not specify, uses self.corr_kernel_size. - sigma_excitation_error (float): sigma value for envelope applied to s_g (excitation errors) in units of inverse Angstroms - tol_excitation_error_mult (float): tolerance in units of sigma for s_g inclusion - tol_intensity (np float): tolerance in intensity units for inclusion of diffraction spots - k_max (float): Maximum scattering vector - min_num_peaks (int): Minimum number of peaks required. - rotation_range (float): Maximum rotation range in radians (for symmetry reduction). - progress_bar (bool): Show progress bar - mask_from_corr (bool): Use ACOM correlation signal for mask - corr_range (np.ndarray): Range of correlation signals for mask - corr_normalize (bool): Normalize correlation signal before masking + Parameters + ---------- + bragg_peaks_array (PointListArray): + All Bragg peaks + orientation_map (OrientationMap): + Orientation map generated from ACOM + corr_kernel_size (float): + Correlation kernel size - if user does + not specify, uses self.corr_kernel_size. + sigma_excitation_error (float): + sigma value for envelope applied to s_g (excitation errors) in units of inverse Angstroms + tol_excitation_error_mult (float): + tolerance in units of sigma for s_g inclusion + tol_intensity (np float): + tolerance in intensity units for inclusion of diffraction spots + k_max (float): + Maximum scattering vector + min_num_peaks (int): + Minimum number of peaks required. + intensity_weighting: bool + Set to True to weight least squares by experimental peak intensity. + robust_fitting: bool + Set to True to use robust fitting, which performs outlier rejection. + robust_thresh: float + Threshold for robust fitting weights. + rotation_range (float): + Maximum rotation range in radians (for symmetry reduction). + progress_bar (bool): + Show progress bar + mask_from_corr (bool): + Use ACOM correlation signal for mask + corr_range (np.ndarray): + Range of correlation signals for mask + corr_normalize (bool): + Normalize correlation signal before masking - Returns: - strain_map (RealSlice): strain tensor + Returns + -------- + strain_map (RealSlice): + strain tensor """ @@ -2143,16 +2168,44 @@ def calculate_strain( (p_ref.data["qx"][inds_match[keep]], p_ref.data["qy"][inds_match[keep]]) ).T - # Apply intensity weighting from experimental measurements - qxy *= p.data["intensity"][keep, None] - qxy_ref *= p.data["intensity"][keep, None] - # Fit transformation matrix # Note - not sure about transpose here # (though it might not matter if rotation isn't included) - m = lstsq(qxy_ref, qxy, rcond=None)[0].T - - # Get the infinitesimal strain matrix + if intensity_weighting: + weights = np.sqrt(p.data["intensity"][keep, None]) * 0 + 1 + m = lstsq( + qxy_ref * weights, + qxy * weights, + rcond=None, + )[0].T + else: + m = lstsq( + qxy_ref, + qxy, + rcond=None, + )[0].T + + # Robust fitting + if robust: + for a0 in range(5): + # calculate new weights + qxy_fit = qxy_ref @ m + diff2 = np.sum((qxy_fit - qxy) ** 2, axis=1) + + weights = np.exp( + diff2 / ((-2 * robust_thresh**2) * np.median(diff2)) + )[:, None] + if intensity_weighting: + weights *= np.sqrt(p.data["intensity"][keep, None]) + + # calculate new fits + m = lstsq( + qxy_ref * weights, + qxy * weights, + rcond=None, + )[0].T + + # Set values into the infinitesimal strain matrix strain_map.get_slice("e_xx").data[rx, ry] = 1 - m[0, 0] strain_map.get_slice("e_yy").data[rx, ry] = 1 - m[1, 1] strain_map.get_slice("e_xy").data[rx, ry] = -(m[0, 1] + m[1, 0]) / 2.0 @@ -2160,7 +2213,7 @@ def calculate_strain( # Add finite rotation from ACOM orientation map. # I am not sure about the relative signs here. - # Also, I need to add in the mirror operator. + # Also, maybe I need to add in the mirror operator? if orientation_map.mirror[rx, ry, 0]: strain_map.get_slice("theta").data[rx, ry] += ( orientation_map.angles[rx, ry, 0, 0] diff --git a/py4DSTEM/process/diffraction/crystal_calibrate.py b/py4DSTEM/process/diffraction/crystal_calibrate.py index c068bf79e..b15015c62 100644 --- a/py4DSTEM/process/diffraction/crystal_calibrate.py +++ b/py4DSTEM/process/diffraction/crystal_calibrate.py @@ -21,7 +21,7 @@ def calibrate_pixel_size( k_max=None, k_step=0.002, k_broadening=0.002, - fit_all_intensities=True, + fit_all_intensities=False, set_calibration_in_place=False, verbose=True, plot_result=False, @@ -50,7 +50,7 @@ def calibrate_pixel_size( k_broadening (float): Initial guess for Gaussian broadening of simulated pattern (Å^-1) fit_all_intensities (bool): Set to true to allow all peak intensities to - change independently False forces a single intensity scaling. + change independently. False forces a single intensity scaling for all peaks. set_calibration (bool): if True, set the fit pixel size to the calibration metadata, and calibrate bragg_peaks verbose (bool): Output the calibrated pixel size. @@ -138,7 +138,7 @@ def fit_profile(k, *coefs): if returnfig: fig, ax = self.plot_scattering_intensity( - bragg_peaks=bragg_peaks, + bragg_peaks=bragg_peaks_cali, figsize=figsize, k_broadening=k_broadening, int_power_scale=1.0, @@ -151,7 +151,7 @@ def fit_profile(k, *coefs): ) else: self.plot_scattering_intensity( - bragg_peaks=bragg_peaks, + bragg_peaks=bragg_peaks_cali, figsize=figsize, k_broadening=k_broadening, int_power_scale=1.0, diff --git a/py4DSTEM/process/utils/single_atom_scatter.py b/py4DSTEM/process/utils/single_atom_scatter.py index 8d6e2a891..54443b68f 100644 --- a/py4DSTEM/process/utils/single_atom_scatter.py +++ b/py4DSTEM/process/utils/single_atom_scatter.py @@ -1,5 +1,6 @@ import numpy as np import os +from scipy.special import kn class single_atom_scatter(object): @@ -46,6 +47,41 @@ def electron_scattering_factor(self, Z, gsq, units="A"): elif units == "A": return fe + def projected_potential(self, Z, R): + ai = self.e_scattering_factors[Z - 1, 0:10:2] + bi = self.e_scattering_factors[Z - 1, 1:10:2] + + # Planck's constant in Js + h = 6.62607004e-34 + # Electron rest mass in kg + me = 9.10938356e-31 + # Electron charge in Coulomb + qe = 1.60217662e-19 + # Electron charge in V-Angstroms + # qe = 14.4 + # Permittivity of vacuum + eps_0 = 8.85418782e-12 + # Bohr's constant + a_0 = 5.29177210903e-11 + + fe = np.zeros_like(R) + for i in range(5): + pre = 2 * np.pi / bi[i] ** 0.5 + fe += (ai[i] / bi[i] ** 1.5) * (kn(0, pre * R) + R * kn(1, pre * R)) + + # Scale output units + # kappa = (4*np.pi*eps_0) / (2*np.pi*a_0*qe) + # fe *= 2*np.pi**2 / kappa + # # # kappa = (4*np.pi*eps_0) / (2*np.pi*a_0*me) + + # # kappa = (4*np.pi*eps_0) / (2*np.pi*a_0*me) + # # return fe * 2 * np.pi**2 # / kappa + # # if units == "VA": + # return h**2 / (2 * np.pi * me * qe) * 1e18 * fe + # # elif units == "A": + # # return fe * 2 * np.pi**2 / kappa + return fe + def get_scattering_factor( self, elements=None, composition=None, q_coords=None, units=None ): From 7a0e34ca9015e1f547603f8698a4fb35e8cf8ee2 Mon Sep 17 00:00:00 2001 From: Colin Date: Thu, 21 Mar 2024 14:38:36 -0700 Subject: [PATCH 47/50] Versioning to 0.14.10 --- py4DSTEM/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py4DSTEM/version.py b/py4DSTEM/version.py index 103751b29..1d9caa86b 100644 --- a/py4DSTEM/version.py +++ b/py4DSTEM/version.py @@ -1 +1 @@ -__version__ = "0.14.9" +__version__ = "0.14.10" From 1ef52d6ac9d907a163274bbdea63f0d1344599ae Mon Sep 17 00:00:00 2001 From: Steve Zeltmann <37132012+sezelt@users.noreply.github.com> Date: Thu, 21 Mar 2024 17:48:04 -0400 Subject: [PATCH 48/50] Revert "Versioning to 0.14.10" --- py4DSTEM/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py4DSTEM/version.py b/py4DSTEM/version.py index 1d9caa86b..103751b29 100644 --- a/py4DSTEM/version.py +++ b/py4DSTEM/version.py @@ -1 +1 @@ -__version__ = "0.14.10" +__version__ = "0.14.9" From c21aa5190bfa7ec628239cc0eaac15ab86f4d495 Mon Sep 17 00:00:00 2001 From: Steve Zeltmann <37132012+sezelt@users.noreply.github.com> Date: Thu, 21 Mar 2024 18:37:11 -0400 Subject: [PATCH 49/50] Update pypi_upload.yml --- .github/workflows/pypi_upload.yml | 84 +++++++++++++++---------------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/.github/workflows/pypi_upload.yml b/.github/workflows/pypi_upload.yml index 1c13429b1..bf0c341cb 100644 --- a/.github/workflows/pypi_upload.yml +++ b/.github/workflows/pypi_upload.yml @@ -39,47 +39,47 @@ jobs: git config --global user.email "ben.savitzky@gmail.com" git config --global user.name "bsavitzky" git commit -a -m "Auto-update version number (GH Action)" - git push origin main - sync_with_dev: - needs: update_version - runs-on: ubuntu-latest - name: Sync main with dev - steps: - - name: Sync main with dev - uses: actions/checkout@v4 - with: - ref: dev - fetch-depth: 0 - token: ${{ secrets.GH_ACTION_VERSION_UPDATE }} - - run: | - # set strategy to default merge - git config pull.rebase false - git config --global user.email "ben.savitzky@gmail.com" - git config --global user.name "bsavitzky" - git pull origin main --commit --no-edit - git push origin dev - deploy: - needs: sync_with_dev - runs-on: ubuntu-latest - name: Deploy to PyPI - steps: - - uses: actions/checkout@v4 - with: - ref: dev - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: 3.8 - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install build - - name: Build package - run: python -m build - - name: Publish package - uses: pypa/gh-action-pypi-publish@release/v1 - with: - user: __token__ - password: ${{ secrets.PYPI_API_TOKEN }} + git push origin + # sync_with_dev: + # needs: update_version + # runs-on: ubuntu-latest + # name: Sync main with dev + # steps: + # - name: Sync main with dev + # uses: actions/checkout@v4 + # with: + # ref: dev + # fetch-depth: 0 + # token: ${{ secrets.GH_ACTION_VERSION_UPDATE }} + # - run: | + # # set strategy to default merge + # git config pull.rebase false + # git config --global user.email "ben.savitzky@gmail.com" + # git config --global user.name "bsavitzky" + # git pull origin main --commit --no-edit + # git push origin dev + # deploy: + # needs: sync_with_dev + # runs-on: ubuntu-latest + # name: Deploy to PyPI + # steps: + # - uses: actions/checkout@v4 + # with: + # ref: dev + # - name: Set up Python + # uses: actions/setup-python@v5 + # with: + # python-version: 3.8 + # - name: Install dependencies + # run: | + # python -m pip install --upgrade pip + # pip install build + # - name: Build package + # run: python -m build + # - name: Publish package + # uses: pypa/gh-action-pypi-publish@release/v1 + # with: + # user: __token__ + # password: ${{ secrets.PYPI_API_TOKEN }} From ac1a498cd4696057bfb4984ce05b4e9522c7a3cf Mon Sep 17 00:00:00 2001 From: Steve Zeltmann <37132012+sezelt@users.noreply.github.com> Date: Thu, 21 Mar 2024 18:47:39 -0400 Subject: [PATCH 50/50] Update pypi_upload.yml --- .github/workflows/pypi_upload.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pypi_upload.yml b/.github/workflows/pypi_upload.yml index bf0c341cb..9d05e3686 100644 --- a/.github/workflows/pypi_upload.yml +++ b/.github/workflows/pypi_upload.yml @@ -6,9 +6,9 @@ on: push: branches: - main - pull_request: - branches: - - main + # pull_request: + # branches: + # - main jobs: update_version: