diff --git a/.github/workflows/container_build.sh b/.github/workflows/container_build.sh index 73a15c093..731cfe2a9 100755 --- a/.github/workflows/container_build.sh +++ b/.github/workflows/container_build.sh @@ -1,6 +1,11 @@ #!/usr/bin/env bash cd /github/workspace/ +# Use archive mirror for CentOS 7 until we are ready to migrate to CentOS 8 +sed -i -e 's/mirrorlist/#mirrorlist/g' \ + -e 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' \ + /etc/yum.repos.d/CentOS-* + # Install dependencies yum install -y wget git centos-release-scl ca-certificates diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index ea3cd94ca..62e37eed5 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -61,8 +61,9 @@ jobs: if: ${{ matrix.config.os == 'macos-latest'}} - name: Install HEXRD + # Install in editable mode for codecov run: | - pip install . + pip install -e . working-directory: hexrd - name: Install requirements-dev.txt @@ -75,4 +76,22 @@ jobs: HEXRD_EXAMPLE_REPO_PATH: ${{ github.workspace }}/examples run: | pytest -s tests/ + if: ${{ matrix.config.os != 'ubuntu-latest'}} working-directory: hexrd + + - name: Run tests with codecov + env: + HEXRD_EXAMPLE_REPO_PATH: ${{ github.workspace }}/examples + run: | + pytest -s --cov hexrd --cov-report xml:coverage.xml tests/ + if: ${{ matrix.config.os == 'ubuntu-latest'}} + working-directory: hexrd + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3 + with: + token: ${{ secrets.CODECOV_TOKEN }} + file: coverage.xml + working-directory: hexrd + if: ${{ matrix.config.os == 'ubuntu-latest'}} + diff --git a/README.md b/README.md index 07028c515..e80a2b46b 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ -[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.8033939.svg)](https://doi.org/10.5281/zenodo.8033939) ![conda-package](https://github.com/HEXRD/hexrd/workflows/conda-package/badge.svg) ![test](https://github.com/HEXRD/hexrd/workflows/test/badge.svg) ![latest version](https://anaconda.org/hexrd/hexrd/badges/version.svg) ![last updated](https://anaconda.org/hexrd/hexrd/badges/latest_release_relative_date.svg) ![downloads](https://anaconda.org/hexrd/hexrd/badges/downloads.svg) +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.8033939.svg)](https://doi.org/10.5281/zenodo.8033939) ![conda-package](https://github.com/HEXRD/hexrd/workflows/conda-package/badge.svg) ![test](https://github.com/HEXRD/hexrd/workflows/test/badge.svg) [![codecov](https://codecov.io/gh/Hexrd/hexrd/graph/badge.svg)](https://codecov.io/gh/Hexrd/hexrd) ![latest version](https://anaconda.org/hexrd/hexrd/badges/version.svg) ![last updated](https://anaconda.org/hexrd/hexrd/badges/latest_release_relative_date.svg) ![downloads](https://anaconda.org/hexrd/hexrd/badges/downloads.svg) + # HEXRD The HEXRD project is developing a cross-platform, open-source library for the general analysis of X-ray diffraction data. This includes powder diffraction, Laue diffraction, and High Energy Diffraction Microscopy (_a.k.a._ 3DXRD, multi-grain rotation method) modalities. At its core, HEXRD provides an abstraction of a generic diffraction instrument with support for multiple detectors. This includes optimized transforms from the direct and reciprocal crystal lattices to the local detector coordinates, harnesses for interpolating image data into scattering angle coordinates, and sophisticated calibration routines. diff --git a/hexrd/cli/fit_grains.py b/hexrd/cli/fit_grains.py index e4527a9a6..77b0781b0 100644 --- a/hexrd/cli/fit_grains.py +++ b/hexrd/cli/fit_grains.py @@ -335,7 +335,6 @@ def execute(args, parser): gid_list = None if args.grains is not None: gid_list = [int(i) for i in args.grains.split(',')] - pass fit_results = fit_grains( cfg, diff --git a/hexrd/constants.py b/hexrd/constants.py index fa92fbcc7..aa813e52f 100644 --- a/hexrd/constants.py +++ b/hexrd/constants.py @@ -251,24 +251,13 @@ def _readenv(name, ctor, default): del warnings return default - -# 0 = do NOT use numba -# 1 = use numba (default) -USE_NUMBA = _readenv("HEXRD_USE_NUMBA", int, 1) -if USE_NUMBA: - try: - import numba - except ImportError: - print("*** Numba not available, processing may run slower ***") - USE_NUMBA = False - del _readenv def set_numba_cache(): """Set the numba cache only if the following are true: - 1. We are using numba + 1. We are using numba - assumed true now 2. We are on Windows 3. We don't have write access to this file 4. The NUMBA_CACHE_DIR environment variable is not defined @@ -277,8 +266,6 @@ def set_numba_cache(): directory where it doesn't have permission, and cause the application to freeze. Avoid that by setting the cache dir ourselves. """ - if not USE_NUMBA: - return if os.name != 'nt': return diff --git a/hexrd/deprecation.py b/hexrd/deprecation.py new file mode 100644 index 000000000..0ac51b271 --- /dev/null +++ b/hexrd/deprecation.py @@ -0,0 +1,35 @@ +import os +import functools + + +class DeprecatedFunctionError(Exception): + """Custom exception for deprecated functions.""" + pass + + +def deprecated(new_func: str = None, removal_date: str = None): + """ + Decorator to mark functions as deprecated. Raises an error if + the 'ACK_DEPRECATED' environment variable is not set. Alerts the + user to the replacement function if provided. + """ + + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + if new_func is not None: + print( + f"Warning: {func.__name__} is deprecated and is marked for" + f" removal. Please use {new_func} instead." + f" Removal date: {removal_date}" + ) + if os.getenv('ACK_DEPRECATED') != 'true': + raise DeprecatedFunctionError( + f"Function {func.__name__} is deprecated. Set environment " + "variable 'ACK_DEPRECATED' to 'true' to acknowledge." + ) + return func(*args, **kwargs) + + return wrapper + + return decorator diff --git a/hexrd/distortion/dexela_2923.py b/hexrd/distortion/dexela_2923.py index d7c7813a1..9495448b5 100644 --- a/hexrd/distortion/dexela_2923.py +++ b/hexrd/distortion/dexela_2923.py @@ -5,11 +5,9 @@ @author: Joel V. Bernier """ import numpy as np +import numba from hexrd import constants -from hexrd.constants import USE_NUMBA -if USE_NUMBA: - import numba from .distortionabc import DistortionABC from .registry import _RegisterDistortionClass @@ -69,71 +67,44 @@ def _find_quadrant(xy_in): return quad_label -if USE_NUMBA: - @numba.njit(nogil=True, cache=True) - def _dexela_2923_distortion(out_, in_, params): - for el in range(len(in_)): - xi, yi = in_[el, :] - if xi < 0.: - if yi < 0.: - # 3rd quadrant - out_[el, :] = in_[el, :] + params[4:6] - else: - # 2nd quadrant - out_[el, :] = in_[el, :] + params[2:4] +@numba.njit(nogil=True, cache=True) +def _dexela_2923_distortion(out_, in_, params): + for el in range(len(in_)): + xi, yi = in_[el, :] + if xi < 0.: + if yi < 0.: + # 3rd quadrant + out_[el, :] = in_[el, :] + params[4:6] else: - if yi < 0.: - # 4th quadrant - out_[el, :] = in_[el, :] + params[6:8] - else: - # 1st quadrant - out_[el, :] = in_[el, :] + params[0:2] - - @numba.njit(nogil=True, cache=True) - def _dexela_2923_inverse_distortion(out_, in_, params): - for el in range(len(in_)): - xi, yi = in_[el, :] - if xi < 0.: - if yi < 0.: - # 3rd quadrant - out_[el, :] = in_[el, :] - params[4:6] - else: - # 2nd quadrant - out_[el, :] = in_[el, :] - params[2:4] + # 2nd quadrant + out_[el, :] = in_[el, :] + params[2:4] + else: + if yi < 0.: + # 4th quadrant + out_[el, :] = in_[el, :] + params[6:8] else: - if yi < 0.: - # 4th quadrant - out_[el, :] = in_[el, :] - params[6:8] - else: - # 1st quadrant - out_[el, :] = in_[el, :] - params[0:2] -else: - def _dexela_2923_distortion(out_, in_, params): - # find quadrant - ql = _find_quadrant(in_) - ql1 = ql == 1 - ql2 = ql == 2 - ql3 = ql == 3 - ql4 = ql == 4 - out_[ql1, :] = in_[ql1] + np.tile(params[0:2], (sum(ql1), 1)) - out_[ql2, :] = in_[ql2] + np.tile(params[2:4], (sum(ql2), 1)) - out_[ql3, :] = in_[ql3] + np.tile(params[4:6], (sum(ql3), 1)) - out_[ql4, :] = in_[ql4] + np.tile(params[6:8], (sum(ql4), 1)) - return - - def _dexela_2923_inverse_distortion(out_, in_, params): - ql = _find_quadrant(in_) - ql1 = ql == 1 - ql2 = ql == 2 - ql3 = ql == 3 - ql4 = ql == 4 - out_[ql1, :] = in_[ql1] - np.tile(params[0:2], (sum(ql1), 1)) - out_[ql2, :] = in_[ql2] - np.tile(params[2:4], (sum(ql2), 1)) - out_[ql3, :] = in_[ql3] - np.tile(params[4:6], (sum(ql3), 1)) - out_[ql4, :] = in_[ql4] - np.tile(params[6:8], (sum(ql4), 1)) - return - - + # 1st quadrant + out_[el, :] = in_[el, :] + params[0:2] + + +@numba.njit(nogil=True, cache=True) +def _dexela_2923_inverse_distortion(out_, in_, params): + for el in range(len(in_)): + xi, yi = in_[el, :] + if xi < 0.: + if yi < 0.: + # 3rd quadrant + out_[el, :] = in_[el, :] - params[4:6] + else: + # 2nd quadrant + out_[el, :] = in_[el, :] - params[2:4] + else: + if yi < 0.: + # 4th quadrant + out_[el, :] = in_[el, :] - params[6:8] + else: + # 1st quadrant + out_[el, :] = in_[el, :] - params[0:2] def test_disortion(): pts = np.random.randn(16, 2) diff --git a/hexrd/distortion/distortionabc.py b/hexrd/distortion/distortionabc.py index 0744b8783..f2eb2882a 100644 --- a/hexrd/distortion/distortionabc.py +++ b/hexrd/distortion/distortionabc.py @@ -8,9 +8,9 @@ class DistortionABC(metaclass=abc.ABCMeta): @abc.abstractmethod def apply(self, xy_in): """Apply distortion mapping""" - pass + raise NotImplementedError @abc.abstractmethod def apply_inverse(self, xy_in): """Apply inverse distortion mapping""" - pass + raise NotImplementedError diff --git a/hexrd/distortion/registry.py b/hexrd/distortion/registry.py index 1be2fb8a1..a36f2f7d0 100644 --- a/hexrd/distortion/registry.py +++ b/hexrd/distortion/registry.py @@ -20,5 +20,3 @@ def register(cls, acls): """Register adapter class""" if acls.__name__ != 'DistortionBase': cls.distortion_registry[acls.maptype] = acls - - pass # end class diff --git a/hexrd/findorientations.py b/hexrd/findorientations.py index 820e2e0f3..b04fe4fd3 100755 --- a/hexrd/findorientations.py +++ b/hexrd/findorientations.py @@ -131,9 +131,6 @@ def generate_orientation_fibers(cfg, eta_ome): ome_c = eta_ome.omeEdges[0] + (0.5 + coms[i][ispot][0])*del_ome eta_c = eta_ome.etaEdges[0] + (0.5 + coms[i][ispot][1])*del_eta input_p.append(np.hstack([this_hkl, this_tth, eta_c, ome_c])) - pass - pass - pass # do the mapping start = timeit.default_timer() @@ -155,7 +152,6 @@ def generate_orientation_fibers(cfg, eta_ome): discretefiber_reduced, input_p, chunksize=chunksize ), total=ntotal ): - pass print(_.shape) ''' pool.close() @@ -352,8 +348,6 @@ def quat_distance(x, y): qbar[:, i] = rot.quatAverageCluster( qfib_r[:, cl == i + 1], qsym ).flatten() - pass - pass if algorithm in ('dbscan', 'ort-dbscan') and qbar.size/4 > 1: logger.info("\tchecking for duplicate orientations...") @@ -374,10 +368,7 @@ def quat_distance(x, y): tmp[:, i] = rot.quatAverageCluster( qbar[:, cl == i + 1].reshape(4, npts), qsym ).flatten() - pass qbar = tmp - pass - pass logger.info("clustering took %f seconds", timeit.default_timer() - start) logger.info( @@ -592,7 +583,7 @@ def generate_eta_ome_maps(cfg, hkls=None, save=True): map_fname ) - eta_ome.save(fn) + eta_ome.save_eta_ome_maps(fn) logger.info('saved eta/ome orientation maps to "%s"', fn) @@ -617,7 +608,7 @@ def _filter_eta_ome_maps(eta_ome, filter_stdev=False): """ gl_filter = ndimage.filters.gaussian_laplace - for i, pf in enumerate(eta_ome.dataStore): + for pf in eta_ome.dataStore: # first compoute row-wise median over omega channel ome_median = np.tile(np.nanmedian(pf, axis=0), (len(pf), 1)) @@ -894,7 +885,7 @@ def find_orientations(cfg, logger.info("\tmean reflections per grain: %d", mean_rpg) logger.info("\tneighborhood size: %d", min_samples) - qbar, cl = run_cluster( + qbar, _ = run_cluster( completeness, qfib, plane_data.getQSym(), cfg, min_samples=min_samples, compl_thresh=compl_thresh, diff --git a/hexrd/fitgrains.py b/hexrd/fitgrains.py index db53d387b..2afb3180e 100644 --- a/hexrd/fitgrains.py +++ b/hexrd/fitgrains.py @@ -184,8 +184,7 @@ def fit_grain_FF_reduced(grain_id): culled_results[det_key] = [presults[i] for i in np.where(idx)[0]] num_refl_tot += len(valid_refl_ids) num_refl_valid += sum(valid_refl_ids) - - pass # now we have culled data + # now we have culled data # CAVEAT: completeness from pullspots only; incl saturated and overlaps # @@ -220,8 +219,6 @@ def fit_grain_FF_reduced(grain_id): plane_data.latVecOps['B'], plane_data.wavelength, ome_period, simOnly=False, return_value_flag=2) - pass # end conditional on fit - pass # end tolerance looping if refit is not None: # first get calculated x, y, ome from previous solution @@ -279,7 +276,6 @@ def fit_grain_FF_reduced(grain_id): ] num_refl_valid += sum(idx_new) - pass # only execute fit if left with enough reflections if num_refl_valid > 12: @@ -297,8 +293,6 @@ def fit_grain_FF_reduced(grain_id): plane_data.latVecOps['B'], plane_data.wavelength, ome_period, simOnly=False, return_value_flag=2) - pass - pass # close refit conditional return grain_id, completeness, chisq, grain_params diff --git a/hexrd/fitting/calibration/calibrator.py b/hexrd/fitting/calibration/calibrator.py index 6264b1d14..de9634b67 100644 --- a/hexrd/fitting/calibration/calibrator.py +++ b/hexrd/fitting/calibration/calibrator.py @@ -8,7 +8,7 @@ class Calibrator(ABC): @abstractmethod def type(self): """The type of the calibrator""" - pass + raise NotImplementedError @abstractmethod def create_lmfit_params(self, current_params): @@ -25,7 +25,7 @@ def create_lmfit_params(self, current_params): for the lattice parameters. The Laue calibrator creates lmfit parameters for crystal parameters. """ - pass + raise NotImplementedError @abstractmethod def update_from_lmfit_params(self, params_dict): @@ -38,7 +38,7 @@ def update_from_lmfit_params(self, params_dict): For example, the powder calibrator will update the lattice parameters on the material. The Laue calibrator will update crystal parameters. """ - pass + raise NotImplementedError @abstractmethod def residual(self, calibration_data=None): @@ -48,7 +48,7 @@ def residual(self, calibration_data=None): the calibration class instead, in which case, calibration_data can be `None`. """ - pass + raise NotImplementedError @property @abstractmethod @@ -73,10 +73,10 @@ def calibration_picks(self): string of the hkl. And "picks" are either a list of points (powder) or a single point (laue). The picks are in cartesian coordinates. """ - pass + raise NotImplementedError @calibration_picks.setter @abstractmethod def calibration_picks(self, val): """Setter for calibration_picks. See getter docs for details.""" - pass + raise NotImplementedError diff --git a/hexrd/fitting/calibration/laue.py b/hexrd/fitting/calibration/laue.py index 131d68bfe..5a6e4d835 100644 --- a/hexrd/fitting/calibration/laue.py +++ b/hexrd/fitting/calibration/laue.py @@ -209,7 +209,6 @@ def autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., valid_hkls = hkls[gid][:, valid_refl] valid_angs = angles[gid][valid_refl, :] valid_energy = energy[gid][valid_refl] - # pass # make patches refl_patches = xrdutil.make_reflection_patches( @@ -291,7 +290,6 @@ def autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., # else: closest_peak_idx = 0 - pass # end multipeak conditional # coms = coms[closest_peak_idx] # @@ -338,7 +336,6 @@ def autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., gaussian_2d_int, [[0., 2.*y0], [0., 2.*x0]], args=fit_par) - pass com_angs = np.hstack([ tth_edges[0] + (0.5 + coms[1])*delta_tth, eta_edges[0] + (0.5 + coms[0])*delta_eta @@ -382,14 +379,12 @@ def autopick_points(self, raw_img_dict, tth_tol=5., eta_tol=5., # spot_intensity = np.nan max_intensity = np.nan - pass reflInfoList.append([peakId, valid_hkls[:, iRefl], (spot_intensity, max_intensity), valid_energy[iRefl], valid_angs[iRefl, :], meas_angs[iRefl, :], meas_xy[iRefl, :]]) - pass reflInfo = np.array( [tuple(i) for i in reflInfoList], dtype=reflInfo_dtype) diff --git a/hexrd/fitting/calibration/multigrain.py b/hexrd/fitting/calibration/multigrain.py index c677c625d..92dccb728 100644 --- a/hexrd/fitting/calibration/multigrain.py +++ b/hexrd/fitting/calibration/multigrain.py @@ -250,7 +250,6 @@ def sxcal_obj_func(plist_fit, plist_full, xy_unwarped[det_key][ig] = panel.distortion.apply( xy_unwarped[det_key][ig] ) - pass # transform G-vectors: # 1) convert inv. stretch tensor from MV notation in to 3x3 @@ -286,8 +285,6 @@ def sxcal_obj_func(plist_fit, plist_full, calc_omes[det_key].append(calc_omes_tmp) calc_xy[det_key].append(calc_xy_tmp) - pass - pass # return values if sim_only: @@ -311,7 +308,6 @@ def sxcal_obj_func(plist_fit, plist_full, calc_xy_all.append(np.vstack(calc_xy[det_key])) meas_omes_all.append(np.hstack(meas_omes[det_key])) calc_omes_all.append(np.hstack(calc_omes[det_key])) - pass meas_xy_all = np.vstack(meas_xy_all) calc_xy_all = np.vstack(calc_xy_all) meas_omes_all = np.hstack(meas_omes_all) diff --git a/hexrd/fitting/grains.py b/hexrd/fitting/grains.py index 659173aac..a407f8781 100644 --- a/hexrd/fitting/grains.py +++ b/hexrd/fitting/grains.py @@ -218,7 +218,6 @@ def objFuncFitGrain(gFit, gFull, gFlag, meas_omes = meas_xyo[:, 2] xy_unwarped = panel.distortion.apply(meas_xyo[:, :2]) meas_xyo = np.vstack([xy_unwarped.T, meas_omes]).T - pass # append to meas_omes meas_xyo_all.append(meas_xyo) @@ -251,7 +250,6 @@ def objFuncFitGrain(gFit, gFull, gFlag, # append to xy dict calc_xy_dict[det_key] = calc_xy - pass # stack results to concatenated arrays calc_omes_all = np.hstack([calc_omes_dict[k] for k in det_keys_ordered]) diff --git a/hexrd/fitting/peakfunctions.py b/hexrd/fitting/peakfunctions.py index 9da7d0da8..40d74c3dd 100644 --- a/hexrd/fitting/peakfunctions.py +++ b/hexrd/fitting/peakfunctions.py @@ -26,9 +26,9 @@ # ============================================================ import numpy as np +from numba import njit import copy from hexrd import constants -from hexrd.utils.decorators import numba_njit_if_available from hexrd.constants import \ c_erf, cnum_exp1exp, cden_exp1exp, c_coeff_exp1exp @@ -56,7 +56,7 @@ """ -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def erfc(x): # save the sign of x sign = np.sign(x) @@ -80,7 +80,7 @@ def erfc(x): """ -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def exp1exp_under1(x): f = np.zeros(x.shape).astype(np.complex128) for i in range(6): @@ -99,7 +99,7 @@ def exp1exp_under1(x): """ -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def exp1exp_over1(x): num = np.zeros(x.shape).astype(np.complex128) den = np.zeros(x.shape).astype(np.complex128) @@ -117,7 +117,7 @@ def exp1exp_over1(x): return (num/den)*(1./x) -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def exp1exp(x): mask = np.sign(x.real)*np.abs(x) > 1. @@ -457,19 +457,19 @@ def split_pvoigt1d(p, x): """ -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def _calc_alpha(alpha, x0): a0, a1 = alpha return (a0 + a1*np.tan(np.radians(0.5*x0))) -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def _calc_beta(beta, x0): b0, b1 = beta return b0 + b1*np.tan(np.radians(0.5*x0)) -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def _mixing_factor_pv(fwhm_g, fwhm_l): """ @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, @@ -499,7 +499,7 @@ def _mixing_factor_pv(fwhm_g, fwhm_l): return eta, fwhm -@numba_njit_if_available(nogil=True) +@njit(nogil=True) def _gaussian_pink_beam(p, x): """ @author Saransh Singh, Lawrence Livermore National Lab @@ -544,7 +544,7 @@ def _gaussian_pink_beam(p, x): return g -@numba_njit_if_available(nogil=True) +@njit(nogil=True) def _lorentzian_pink_beam(p, x): """ @author Saransh Singh, Lawrence Livermore National Lab @@ -579,7 +579,7 @@ def _lorentzian_pink_beam(p, x): return y -@numba_njit_if_available(nogil=True) +@njit(nogil=True) def _pink_beam_dcs_no_bg(p, x): """ @author Saransh Singh, Lawrence Livermore National Lab diff --git a/hexrd/fitting/utils.py b/hexrd/fitting/utils.py index 07c3cff60..47f72c953 100644 --- a/hexrd/fitting/utils.py +++ b/hexrd/fitting/utils.py @@ -1,12 +1,12 @@ import fnmatch import numpy as np +from numba import njit from hexrd.constants import ( c_erf, cnum_exp1exp, cden_exp1exp, c_coeff_exp1exp ) from hexrd.matrixutil import uniqueVectors -from hexrd.utils.decorators import numba_njit_if_available # ============================================================================= @@ -138,7 +138,7 @@ def _set_peak_center_bounds(params, window_range, min_sep=0.01): """ -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def erfc(x): # save the sign of x sign = np.sign(x) @@ -162,7 +162,7 @@ def erfc(x): """ -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def exp1exp_under1(x): f = np.zeros(x.shape).astype(np.complex128) for i in range(6): @@ -181,7 +181,7 @@ def exp1exp_under1(x): """ -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def exp1exp_over1(x): num = np.zeros(x.shape).astype(np.complex128) den = np.zeros(x.shape).astype(np.complex128) @@ -199,7 +199,7 @@ def exp1exp_over1(x): return (num/den)*(1./x) -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def exp1exp(x): mask = np.sign(x.real)*np.abs(x) > 1. @@ -210,19 +210,19 @@ def exp1exp(x): return f -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def _calc_alpha(alpha, x0): a0, a1 = alpha return (a0 + a1*np.tan(np.radians(0.5*x0))) -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def _calc_beta(beta, x0): b0, b1 = beta return b0 + b1*np.tan(np.radians(0.5*x0)) -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def _mixing_factor_pv(fwhm_g, fwhm_l): """ @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, @@ -252,7 +252,7 @@ def _mixing_factor_pv(fwhm_g, fwhm_l): return eta, fwhm -@numba_njit_if_available(nogil=True) +@njit(nogil=True) def _gaussian_pink_beam(p, x): """ @author Saransh Singh, Lawrence Livermore National Lab @@ -298,7 +298,7 @@ def _gaussian_pink_beam(p, x): return g -@numba_njit_if_available(nogil=True) +@njit(nogil=True) def _lorentzian_pink_beam(p, x): """ @author Saransh Singh, Lawrence Livermore National Lab diff --git a/hexrd/grainmap/nfutil.py b/hexrd/grainmap/nfutil.py index cd79576a0..23e335fbe 100644 --- a/hexrd/grainmap/nfutil.py +++ b/hexrd/grainmap/nfutil.py @@ -58,7 +58,6 @@ logging.info(f'{rank=} {world_size=} {hostname=}') except ImportError: logging.warning(f'mpi4py failed to load on {hostname=}. MPI is disabled.') - pass def load_instrument(yml): diff --git a/hexrd/gridutil.py b/hexrd/gridutil.py index f9c5cb70f..5958cdfe4 100644 --- a/hexrd/gridutil.py +++ b/hexrd/gridutil.py @@ -27,10 +27,10 @@ # ============================================================================= import numpy as np from numpy.linalg import det +import numba + +from hexrd.constants import sqrt_epsf -from hexrd.constants import USE_NUMBA, sqrt_epsf -if USE_NUMBA: - import numba def cellIndices(edges, points_1d): @@ -90,12 +90,13 @@ def cellIndices(edges, points_1d): return np.array(idx, dtype=int) +@numba.njit(nogil=True, cache=True) def _fill_connectivity(out, m, n, p): i_con = 0 for k in range(p): + extra = k*(n+1)*(m+1) for j in range(m): for i in range(n): - extra = k*(n+1)*(m+1) out[i_con, 0] = i + j*(n + 1) + 1 + extra out[i_con, 1] = i + j*(n + 1) + extra out[i_con, 2] = i + j + n*(j+1) + 1 + extra @@ -103,10 +104,6 @@ def _fill_connectivity(out, m, n, p): i_con += 1 -if USE_NUMBA: - _fill_connectivity = numba.njit(nogil=True, cache=True)(_fill_connectivity) - - def cellConnectivity(m, n, p=1, origin='ul'): """ p x m x n (layers x rows x cols) @@ -122,73 +119,47 @@ def cellConnectivity(m, n, p=1, origin='ul'): if p > 1: nele = m*n*(p-1) - tmp_con3 = con.reshape(p, m*n, 4) + tmp_con3 = con.reshape((p, m*n, 4)) hex_con = [] for layer in range(p - 1): hex_con.append(np.hstack([tmp_con3[layer], tmp_con3[layer + 1]])) con = np.vstack(hex_con) - pass if origin.lower().strip() == 'll': con = con[:, ::-1] return con -if USE_NUMBA: - @numba.njit(nogil=True, cache=True) # relies on loop extraction - def cellCentroids(crd, con): - nele, conn_count = con.shape - dim = crd.shape[1] - out = np.empty((nele, dim)) - inv_conn = 1.0/conn_count - for i in range(nele): - for j in range(dim): - acc = 0.0 - for k in range(conn_count): - acc += crd[con[i, k], j] - out[i, j] = acc * inv_conn - return out - - @numba.njit(nogil=True, cache=True) - def compute_areas(xy_eval_vtx, conn): - areas = np.empty(len(conn)) - for i in range(len(conn)): - c0, c1, c2, c3 = conn[i] - vtx0x, vtx0y = xy_eval_vtx[conn[i, 0]] - vtx1x, vtx1y = xy_eval_vtx[conn[i, 1]] - v0x, v0y = vtx1x-vtx0x, vtx1y-vtx0y - acc = 0 - for j in range(2, 4): - vtx_x, vtx_y = xy_eval_vtx[conn[i, j]] - v1x = vtx_x - vtx0x - v1y = vtx_y - vtx0y - acc += v0x*v1y - v1x*v0y - - areas[i] = 0.5 * acc - return areas -else: - def cellCentroids(crd, con): - """ - con.shape = (nele, 4) - crd.shape = (ncrd, 2) - - con.shape = (nele, 8) - crd.shape = (ncrd, 3) - """ - nele = con.shape[0] - dim = crd.shape[1] - centroid_xy = np.zeros((nele, dim)) - for i in range(len(con)): - el_crds = crd[con[i, :], :] # (4, 2) - centroid_xy[i, :] = (el_crds).mean(axis=0) - return centroid_xy - - def compute_areas(xy_eval_vtx, conn): - areas = np.zeros(len(conn)) - for i in range(len(conn)): - polygon = [[xy_eval_vtx[conn[i, j], 0], - xy_eval_vtx[conn[i, j], 1]] for j in range(4)] - areas[i] = computeArea(polygon) - return areas +@numba.njit(nogil=True, cache=True) # relies on loop extraction +def cellCentroids(crd, con): + nele, conn_count = con.shape + dim = crd.shape[1] + out = np.empty((nele, dim)) + inv_conn = 1.0/conn_count + for i in range(nele): + for j in range(dim): + acc = 0.0 + for k in range(conn_count): + acc += crd[con[i, k], j] + out[i, j] = acc * inv_conn + return out + + +@numba.njit(nogil=True, cache=True) +def compute_areas(xy_eval_vtx, conn): + areas = np.empty(len(conn)) + for i in range(len(conn)): + vtx0x, vtx0y = xy_eval_vtx[conn[i, 0]] + vtx1x, vtx1y = xy_eval_vtx[conn[i, 1]] + v0x, v0y = vtx1x-vtx0x, vtx1y-vtx0y + acc = 0 + for j in range(2, 4): + vtx_x, vtx_y = xy_eval_vtx[conn[i, j]] + v1x = vtx_x - vtx0x + v1y = vtx_y - vtx0y + acc += v0x*v1y - v1x*v0y + + areas[i] = 0.5 * acc + return areas def computeArea(polygon): @@ -201,24 +172,23 @@ def computeArea(polygon): triv = np.array([[[0, i - 1], [0, i]] for i in range(2, n_vertices)]) area = 0 - for i in range(len(triv)): + for [s1, s2] in triv: tvp = np.diff( - np.hstack([polygon[triv[i][0], :], - polygon[triv[i][1], :]]), axis=0).flatten() + np.hstack([polygon[s1, :], + polygon[s2, :]]), axis=0).flatten() area += 0.5 * np.cross(tvp[:2], tvp[2:]) return area def make_tolerance_grid(bin_width, window_width, num_subdivisions, adjust_window=False, one_sided=False): - if bin_width > window_width: - bin_width = window_width + bin_width = min(bin_width, window_width) if adjust_window: window_width = np.ceil(window_width/bin_width)*bin_width if one_sided: ndiv = abs(int(window_width/bin_width)) grid = (np.arange(0, 2*ndiv+1) - ndiv)*bin_width - ndiv = 2*ndiv + ndiv *= 2 else: ndiv = int(num_subdivisions*np.ceil(window_width/float(bin_width))) grid = np.arange(0, ndiv+1)*window_width/float(ndiv) - 0.5*window_width @@ -228,46 +198,33 @@ def make_tolerance_grid(bin_width, window_width, num_subdivisions, def computeIntersection(line1, line2): """ compute intersection of two-dimensional line intersection + Returns the intersection point as an array of length 2. + If the lines are parallel (or equal) the function returns an empty array. this is an implementation of two lines: - line1 = [ [x0, y0], [x1, y1] ] - line1 = [ [x3, y3], [x4, y4] ] + line1 = [ [x1, y1], [x2, y2] ] + line2 = [ [x3, y3], [x4, y4] ] """ intersection = np.zeros(2) - l1 = np.array(line1) - l2 = np.array(line2) + [x1, y1] = line1[0] + [x2, y2] = line1[1] + [x3, y3] = line2[0] + [x4, y4] = line2[1] - det_l1 = det(l1) - det_l2 = det(l2) + denom = (x1-x2)*(y3-y4) - (y1-y2)*(x3-x4) + if denom == 0: + return [] - det_l1_x = det(np.vstack([l1[:, 0], np.ones(2)]).T) - det_l1_y = det(np.vstack([l1[:, 1], np.ones(2)]).T) + subterm1 = x1*y2 - y1*x2 + subterm2 = x3*y4 - y3*x4 - det_l2_x = det(np.vstack([l2[:, 0], np.ones(2)]).T) - det_l2_y = det(np.vstack([l2[:, 1], np.ones(2)]).T) - - denominator = det( - np.vstack([[det_l1_x, det_l1_y], [det_l2_x, det_l2_y]]) - ) - - if denominator == 0: - intersection = [] - else: - intersection[0] = det( - np.vstack( - [[det_l1, det_l1_x], [det_l2, det_l2_x]] - ) - ) / denominator - intersection[1] = det( - np.vstack( - [[det_l1, det_l1_y], [det_l2, det_l2_y]] - ) - ) / denominator + intersection[0] = (subterm1*(x3-x4) - subterm2*(x1-x2)) / denom + intersection[1] = (subterm1*(y3-y4) - subterm2*(y1-y2)) / denom return intersection @@ -295,6 +252,7 @@ def isinside(point, boundary, ccw=True): def sutherlandHodgman(subjectPolygon, clipPolygon): """ + https://en.wikipedia.org/wiki/Sutherland%E2%80%93Hodgman_algorithm """ subjectPolygon = np.array(subjectPolygon) clipPolygon = np.array(clipPolygon) @@ -332,7 +290,6 @@ def sutherlandHodgman(subjectPolygon, clipPolygon): outputList.append( computeIntersection(subjectLineSegment, clipBoundary) ) - pass outputList.append(curr_subjectVertex) elif isinside(prev_subjectVertex, clipBoundary): subjectLineSegment = np.vstack( @@ -341,9 +298,6 @@ def sutherlandHodgman(subjectPolygon, clipPolygon): outputList.append( computeIntersection(subjectLineSegment, clipBoundary) ) - pass prev_subjectVertex = curr_subjectVertex prev_clipVertex = curr_clipVertex - pass - pass return outputList diff --git a/hexrd/imageseries/baseclass.py b/hexrd/imageseries/baseclass.py index a99e0e490..729c8af37 100644 --- a/hexrd/imageseries/baseclass.py +++ b/hexrd/imageseries/baseclass.py @@ -39,5 +39,3 @@ def shape(self): @property def metadata(self): return self._adapter.metadata - - pass # end class diff --git a/hexrd/imageseries/load/framecache.py b/hexrd/imageseries/load/framecache.py index 20fbd632a..b6435942d 100644 --- a/hexrd/imageseries/load/framecache.py +++ b/hexrd/imageseries/load/framecache.py @@ -135,5 +135,3 @@ def __iter__(self): #@memoize def __len__(self): return self._nframes - - pass # end class diff --git a/hexrd/imageseries/load/hdf5.py b/hexrd/imageseries/load/hdf5.py index 074c9f7b9..c573531e6 100644 --- a/hexrd/imageseries/load/hdf5.py +++ b/hexrd/imageseries/load/hdf5.py @@ -55,7 +55,6 @@ def __del__(self): self.close() except(Exception): warnings.warn("HDF5ImageSeries could not close h5 file") - pass def __getitem__(self, key): if self._ndim == 2: @@ -138,5 +137,3 @@ def shape(self): return self.__image_dataset.shape else: return self.__image_dataset.shape[1:] - - pass # end class diff --git a/hexrd/imageseries/load/imagefiles.py b/hexrd/imageseries/load/imagefiles.py index e49628515..532d2c006 100644 --- a/hexrd/imageseries/load/imagefiles.py +++ b/hexrd/imageseries/load/imagefiles.py @@ -234,8 +234,6 @@ def _load_data(self, filename, frame=None): return _process_data(filename, data) - pass # end class - class FileInfo(object): """class for managing individual file information""" diff --git a/hexrd/imageseries/load/registry.py b/hexrd/imageseries/load/registry.py index 3bcb1270e..f87315f45 100644 --- a/hexrd/imageseries/load/registry.py +++ b/hexrd/imageseries/load/registry.py @@ -9,5 +9,3 @@ def register(cls, acls): """Register adapter class""" if acls.__name__ != 'ImageSeriesAdapter': cls.adapter_registry[acls.format] = acls - - pass # end class diff --git a/hexrd/imageseries/omega.py b/hexrd/imageseries/omega.py index 7875a9f3b..f0a61ef2d 100644 --- a/hexrd/imageseries/omega.py +++ b/hexrd/imageseries/omega.py @@ -213,8 +213,6 @@ def save_omegas(self, fname): """ np.save(fname, self.omegas) - pass # end class - class OmegaSeriesError(Exception): def __init__(self, value): diff --git a/hexrd/imageseries/process.py b/hexrd/imageseries/process.py index c2101fc79..f95835b41 100644 --- a/hexrd/imageseries/process.py +++ b/hexrd/imageseries/process.py @@ -146,5 +146,3 @@ def addop(self, key, func): def oplist(self): """list of operations to apply""" return self._oplist - - pass # end class diff --git a/hexrd/imageseries/save.py b/hexrd/imageseries/save.py index 187b5fc67..092d30aa6 100644 --- a/hexrd/imageseries/save.py +++ b/hexrd/imageseries/save.py @@ -62,8 +62,6 @@ def register(cls, wcls): def getwriter(cls, name): """return instance associated with name""" return cls.writer_registry[name] - # - pass # end class class Writer(object, metaclass=_RegisterWriter): @@ -193,8 +191,6 @@ def h5opts(self): return d - pass # end class - class WriteFrameCache(Writer): """write frame cache imageseries diff --git a/hexrd/indexer.py b/hexrd/indexer.py index f38ed3a9f..b3c9ebb5c 100644 --- a/hexrd/indexer.py +++ b/hexrd/indexer.py @@ -32,19 +32,18 @@ import multiprocessing import numpy as np +import numba import timeit from hexrd import constants from hexrd.transforms import xfcapi -from hexrd.constants import USE_NUMBA -if USE_NUMBA: - import numba + # ============================================================================= # Parameters # ============================================================================= -omega_period_DFLT = np.radians(np.r_[-180., 180.]) +omega_period_DFLT = np.radians(np.r_[-180.0, 180.0]) paramMP = None nCPUs_DFLT = multiprocessing.cpu_count() @@ -54,13 +53,20 @@ # ============================================================================= # Methods # ============================================================================= -def paintGrid(quats, etaOmeMaps, - threshold=None, bMat=None, - omegaRange=None, etaRange=None, - omeTol=constants.d2r, etaTol=constants.d2r, - omePeriod=omega_period_DFLT, - doMultiProc=False, - nCPUs=None, debug=False): +def paintGrid( + quats, + etaOmeMaps, + threshold=None, + bMat=None, + omegaRange=None, + etaRange=None, + omeTol=constants.d2r, + etaTol=constants.d2r, + omePeriod=omega_period_DFLT, + doMultiProc=False, + nCPUs=None, + debug=False, +): r""" Spherical map-based indexing algorithm, i.e. paintGrid. @@ -157,10 +163,7 @@ def paintGrid(quats, etaOmeMaps, # !!! these are master hklIDs hklIDs = np.asarray(etaOmeMaps.iHKLList) hklList = planeData.getHKLs(*hklIDs).tolist() - hkl_idx = planeData.getHKLID( - planeData.getHKLs(*hklIDs).T, - master=False - ) + hkl_idx = planeData.getHKLID(planeData.getHKLs(*hklIDs).T, master=False) nHKLS = len(hklIDs) numEtas = len(etaOmeMaps.etaEdges) - 1 @@ -170,8 +173,10 @@ def paintGrid(quats, etaOmeMaps, threshold = np.zeros(nHKLS) for i in range(nHKLS): threshold[i] = np.mean( - np.r_[np.mean(etaOmeMaps.dataStore[i]), - np.median(etaOmeMaps.dataStore[i])] + np.r_[ + np.mean(etaOmeMaps.dataStore[i]), + np.median(etaOmeMaps.dataStore[i]), + ] ) elif threshold is not None and not hasattr(threshold, '__len__'): threshold = threshold * np.ones(nHKLS) @@ -183,7 +188,7 @@ def paintGrid(quats, etaOmeMaps, else: raise RuntimeError( "unknown threshold option. should be a list of numbers or None" - ) + ) if bMat is None: bMat = planeData.latVecOps['B'] @@ -195,14 +200,22 @@ def paintGrid(quats, etaOmeMaps, omeMin = None omeMax = None if omegaRange is None: # FIXME - omeMin = [np.min(etaOmeMaps.omeEdges), ] - omeMax = [np.max(etaOmeMaps.omeEdges), ] + omeMin = [ + np.min(etaOmeMaps.omeEdges), + ] + omeMax = [ + np.max(etaOmeMaps.omeEdges), + ] else: omeMin = [omegaRange[i][0] for i in range(len(omegaRange))] omeMax = [omegaRange[i][1] for i in range(len(omegaRange))] if omeMin is None: - omeMin = [-np.pi, ] - omeMax = [np.pi, ] + omeMin = [ + -np.pi, + ] + omeMax = [ + np.pi, + ] omeMin = np.asarray(omeMin) omeMax = np.asarray(omeMax) @@ -212,8 +225,12 @@ def paintGrid(quats, etaOmeMaps, etaMin = [etaRange[i][0] for i in range(len(etaRange))] etaMax = [etaRange[i][1] for i in range(len(etaRange))] if etaMin is None: - etaMin = [-np.pi, ] - etaMax = [np.pi, ] + etaMin = [ + -np.pi, + ] + etaMax = [ + np.pi, + ] etaMin = np.asarray(etaMin) etaMax = np.asarray(etaMax) @@ -224,8 +241,9 @@ def paintGrid(quats, etaOmeMaps, chunksize = min(quats.shape[1] // nCPUs, 10) logger.info( "using multiprocessing with %d processes and a chunk size of %d", - nCPUs, chunksize - ) + nCPUs, + chunksize, + ) else: logger.info("running in serial mode") nCPUs = 1 @@ -259,24 +277,24 @@ def paintGrid(quats, etaOmeMaps, 'etaEdges': etaOmeMaps.etaEdges, 'etaOmeMaps': np.stack(etaOmeMaps.dataStore), 'bMat': bMat, - 'threshold': np.asarray(threshold) - } + 'threshold': np.asarray(threshold), + } # do the mapping start = timeit.default_timer() retval = None if multiProcMode: # multiple process version - pool = multiprocessing.Pool(nCPUs, paintgrid_init, (params, )) + pool = multiprocessing.Pool(nCPUs, paintgrid_init, (params,)) retval = pool.map(paintGridThis, quats.T, chunksize=chunksize) pool.close() else: # single process version. global paramMP - paintgrid_init(params) # sets paramMP + paintgrid_init(params) # sets paramMP retval = list(map(paintGridThis, quats.T)) - paramMP = None # clear paramMP - elapsed = (timeit.default_timer() - start) + paramMP = None # clear paramMP + elapsed = timeit.default_timer() - start logger.info("paintGrid took %.3f seconds", elapsed) return retval @@ -357,18 +375,19 @@ def _normalize_ranges(starts, stops, offset, ccw=False): # return the full range two_pi = 2 * np.pi if np.any((starts + two_pi) < stops + 1e-8): - return np.array([offset, two_pi+offset]) + return np.array([offset, two_pi + offset]) starts = np.mod(starts - offset, two_pi) + offset stops = np.mod(stops - offset, two_pi) + offset order = np.argsort(starts) - result = np.hstack((starts[order, np.newaxis], - stops[order, np.newaxis])).ravel() + result = np.hstack( + (starts[order, np.newaxis], stops[order, np.newaxis]) + ).ravel() # at this point, result is in its final form unless there # is wrap-around in the last segment. Handle this case: if result[-1] < result[-2]: - new_result = np.empty((len(result)+2,), dtype=result.dtype) + new_result = np.empty((len(result) + 2,), dtype=result.dtype) new_result[0] = offset new_result[1] = result[-1] new_result[2:-1] = result[0:-1] @@ -403,13 +422,13 @@ def paintgrid_init(params): # instead of building etaMin/etaMax and omeMin/omeMax. It may also # be worth handling range overlap and maybe "optimize" ranges if # there happens to be contiguous spans. - paramMP['valid_eta_spans'] = _normalize_ranges(paramMP['etaMin'], - paramMP['etaMax'], - -np.pi) + paramMP['valid_eta_spans'] = _normalize_ranges( + paramMP['etaMin'], paramMP['etaMax'], -np.pi + ) - paramMP['valid_ome_spans'] = _normalize_ranges(paramMP['omeMin'], - paramMP['omeMax'], - min(paramMP['omePeriod'])) + paramMP['valid_ome_spans'] = _normalize_ranges( + paramMP['omeMin'], paramMP['omeMax'], min(paramMP['omePeriod']) + ) return @@ -423,6 +442,9 @@ def paintgrid_init(params): # There is a version of PaintGridThis using numba, and another version used # when numba is not available. The numba version should be noticeably faster. ############################################################################### + + +@numba.njit(nogil=True, cache=True) def _check_dilated(eta, ome, dpix_eta, dpix_ome, etaOmeMap, threshold): """Part of paintGridThis. @@ -441,11 +463,11 @@ def _check_dilated(eta, ome, dpix_eta, dpix_ome, etaOmeMap, threshold): i_max, j_max = etaOmeMap.shape ome_start, ome_stop = ( max(ome - dpix_ome, 0), - min(ome + dpix_ome + 1, i_max) + min(ome + dpix_ome + 1, i_max), ) eta_start, eta_stop = ( max(eta - dpix_eta, 0), - min(eta + dpix_eta + 1, j_max) + min(eta + dpix_eta + 1, j_max), ) for i in range(ome_start, ome_stop): @@ -457,406 +479,267 @@ def _check_dilated(eta, ome, dpix_eta, dpix_ome, etaOmeMap, threshold): return 0 -if USE_NUMBA: - def paintGridThis(quat): - """Single instance paintGrid call. - - Note that this version does not use omeMin/omeMax to specify the valid - angles. It uses "valid_eta_spans" and "valid_ome_spans". These are - precomputed and make for a faster check of ranges than - "validateAngleRanges" - """ - symHKLs = paramMP['symHKLs'] # the HKLs - symHKLs_ix = paramMP['symHKLs_ix'] # index partitioning of symHKLs - bMat = paramMP['bMat'] - wavelength = paramMP['wavelength'] - omeEdges = paramMP['omeEdges'] - omeTol = paramMP['omeTol'] - omePeriod = paramMP['omePeriod'] - valid_eta_spans = paramMP['valid_eta_spans'] - valid_ome_spans = paramMP['valid_ome_spans'] - omeIndices = paramMP['omeIndices'] - etaEdges = paramMP['etaEdges'] - etaTol = paramMP['etaTol'] - etaIndices = paramMP['etaIndices'] - etaOmeMaps = paramMP['etaOmeMaps'] - threshold = paramMP['threshold'] - - # dpix_ome and dpix_eta are the number of pixels for the tolerance in - # ome/eta. Maybe we should compute this per run instead of per - # quaternion - del_ome = abs(omeEdges[1] - omeEdges[0]) - del_eta = abs(etaEdges[1] - etaEdges[0]) - dpix_ome = int(round(omeTol / del_ome)) - dpix_eta = int(round(etaTol / del_eta)) - - # FIXME - debug = False - if debug: - print( - "using ome, eta dilitations of (%d, %d) pixels" - % (dpix_ome, dpix_eta) - ) +def paintGridThis(quat): + """Single instance paintGrid call. - # get the equivalent rotation of the quaternion in matrix form (as - # expected by oscillAnglesOfHKLs - - rMat = xfcapi.makeRotMatOfQuat(quat) - - # Compute the oscillation angles of all the symHKLs at once - oangs_pair = xfcapi.oscillAnglesOfHKLs(symHKLs, 0., rMat, bMat, - wavelength) - # pdb.set_trace() - return _filter_and_count_hits(oangs_pair[0], oangs_pair[1], symHKLs_ix, - etaEdges, valid_eta_spans, - valid_ome_spans, omeEdges, omePeriod, - etaOmeMaps, etaIndices, omeIndices, - dpix_eta, dpix_ome, threshold) - - @numba.njit(nogil=True, cache=True) - def _find_in_range(value, spans): - """ - Find the index in spans where value >= spans[i] and value < spans[i]. - - spans is an ordered array where spans[i] <= spans[i+1] - (most often < will hold). - - If value is not in the range [spans[0], spans[-1]], then - -2 is returned. - - This is equivalent to "bisect_right" in the bisect package, in which - code it is based, and it is somewhat similar to NumPy's searchsorted, - but non-vectorized - """ - if value < spans[0] or value >= spans[-1]: - return -2 - - # from the previous check, we know 0 is not a possible result - li = 0 - ri = len(spans) - - while li < ri: - mi = (li + ri) // 2 - if value < spans[mi]: - ri = mi - else: - li = mi+1 - - return li - - @numba.njit(nogil=True, cache=True) - def _angle_is_hit(ang, eta_offset, ome_offset, hkl, valid_eta_spans, - valid_ome_spans, etaEdges, omeEdges, etaOmeMaps, - etaIndices, omeIndices, dpix_eta, dpix_ome, threshold): - """Perform work on one of the angles. - - This includes: - - - filtering nan values - - - filtering out angles not in the specified spans - - - checking that the discretized angle fits into the sensor range (maybe - this could be merged with the previous test somehow, for extra speed) - - - actual check for a hit, using dilation for the tolerance. - - Note the function returns both, if it was a hit and if it passed the - filtering, as we'll want to discard the filtered values when computing - the hit percentage. - - CAVEAT: added map-based nan filtering to _check_dilated; this may not - be the best option. Perhaps filter here? - - """ - tth, eta, ome = ang - - if np.isnan(tth): - return 0, 0 - - eta = _map_angle(eta, eta_offset) - if _find_in_range(eta, valid_eta_spans) & 1 == 0: - # index is even: out of valid eta spans - return 0, 0 - - ome = _map_angle(ome, ome_offset) - if _find_in_range(ome, valid_ome_spans) & 1 == 0: - # index is even: out of valid ome spans - return 0, 0 - - # discretize the angles - eta_idx = _find_in_range(eta, etaEdges) - 1 - if eta_idx < 0: - # out of range - return 0, 0 - - ome_idx = _find_in_range(ome, omeEdges) - 1 - if ome_idx < 0: - # out of range - return 0, 0 - - eta = etaIndices[eta_idx] - ome = omeIndices[ome_idx] - isHit = _check_dilated(eta, ome, dpix_eta, dpix_ome, - etaOmeMaps[hkl], threshold[hkl]) - if isHit == -1: - return 0, 0 + Note that this version does not use omeMin/omeMax to specify the valid + angles. It uses "valid_eta_spans" and "valid_ome_spans". These are + precomputed and make for a faster check of ranges than + "validateAngleRanges" + """ + symHKLs = paramMP['symHKLs'] # the HKLs + symHKLs_ix = paramMP['symHKLs_ix'] # index partitioning of symHKLs + bMat = paramMP['bMat'] + wavelength = paramMP['wavelength'] + omeEdges = paramMP['omeEdges'] + omeTol = paramMP['omeTol'] + omePeriod = paramMP['omePeriod'] + valid_eta_spans = paramMP['valid_eta_spans'] + valid_ome_spans = paramMP['valid_ome_spans'] + omeIndices = paramMP['omeIndices'] + etaEdges = paramMP['etaEdges'] + etaTol = paramMP['etaTol'] + etaIndices = paramMP['etaIndices'] + etaOmeMaps = paramMP['etaOmeMaps'] + threshold = paramMP['threshold'] + + # dpix_ome and dpix_eta are the number of pixels for the tolerance in + # ome/eta. Maybe we should compute this per run instead of per + # quaternion + del_ome = abs(omeEdges[1] - omeEdges[0]) + del_eta = abs(etaEdges[1] - etaEdges[0]) + dpix_ome = int(round(omeTol / del_ome)) + dpix_eta = int(round(etaTol / del_eta)) + + # FIXME + debug = False + if debug: + print( + "using ome, eta dilitations of (%d, %d) pixels" + % (dpix_ome, dpix_eta) + ) + + # get the equivalent rotation of the quaternion in matrix form (as + # expected by oscillAnglesOfHKLs + + rMat = xfcapi.makeRotMatOfQuat(quat) + + # Compute the oscillation angles of all the symHKLs at once + oangs_pair = xfcapi.oscillAnglesOfHKLs( + symHKLs, 0.0, rMat, bMat, wavelength + ) + # pdb.set_trace() + return _filter_and_count_hits( + oangs_pair[0], + oangs_pair[1], + symHKLs_ix, + etaEdges, + valid_eta_spans, + valid_ome_spans, + omeEdges, + omePeriod, + etaOmeMaps, + etaIndices, + omeIndices, + dpix_eta, + dpix_ome, + threshold, + ) + + +@numba.njit(nogil=True, cache=True) +def _find_in_range(value, spans): + """ + Find the index in spans where value >= spans[i] and value < spans[i]. + + spans is an ordered array where spans[i] <= spans[i+1] + (most often < will hold). + + If value is not in the range [spans[0], spans[-1]], then + -2 is returned. + + This is equivalent to "bisect_right" in the bisect package, in which + code it is based, and it is somewhat similar to NumPy's searchsorted, + but non-vectorized + """ + if value < spans[0] or value >= spans[-1]: + return -2 + + # from the previous check, we know 0 is not a possible result + li = 0 + ri = len(spans) + + while li < ri: + mi = (li + ri) // 2 + if value < spans[mi]: + ri = mi else: - return isHit, 1 - - @numba.njit(nogil=True, cache=True) - def _filter_and_count_hits(angs_0, angs_1, symHKLs_ix, etaEdges, - valid_eta_spans, valid_ome_spans, omeEdges, - omePeriod, etaOmeMaps, etaIndices, omeIndices, - dpix_eta, dpix_ome, threshold): - """Accumulate completeness scores. - - assumes: - we want etas in -pi -> pi range - we want omes in ome_offset -> ome_offset + 2*pi range - - Instead of creating an array with the angles of angs_0 and angs_1 - interleaved, in this numba version calls for both arrays are performed - getting the angles from angs_0 and angs_1. this is done in this way to - reuse hkl computation. This may not be that important, though. - - """ - eta_offset = -np.pi - ome_offset = np.min(omePeriod) - hits = 0 - total = 0 - curr_hkl_idx = 0 - end_curr = symHKLs_ix[1] - count = len(angs_0) - - for i in range(count): - if i >= end_curr: - curr_hkl_idx += 1 - end_curr = symHKLs_ix[curr_hkl_idx+1] - - # first solution - hit, not_filtered = _angle_is_hit( - angs_0[i], eta_offset, ome_offset, - curr_hkl_idx, valid_eta_spans, - valid_ome_spans, etaEdges, - omeEdges, etaOmeMaps, etaIndices, - omeIndices, dpix_eta, dpix_ome, - threshold) - hits += hit - total += not_filtered - - # second solution - hit, not_filtered = _angle_is_hit( - angs_1[i], eta_offset, ome_offset, - curr_hkl_idx, valid_eta_spans, - valid_ome_spans, etaEdges, - omeEdges, etaOmeMaps, etaIndices, - omeIndices, dpix_eta, dpix_ome, - threshold) - hits += hit - total += not_filtered - - return float(hits)/float(total) if total != 0 else 0.0 - - @numba.njit(nogil=True, cache=True) - def _map_angle(angle, offset): - """Numba-firendly equivalent to xf.mapAngle.""" - return np.mod(angle-offset, 2*np.pi)+offset - - # use a jitted version of _check_dilated - _check_dilated = numba.njit(nogil=True, cache=True)(_check_dilated) -else: - def paintGridThis(quat): - """ - Single instance completeness test. - - Parameters - ---------- - quat : (4,) array_like - DESCRIPTION. - - Returns - ------- - retval : float - DESCRIPTION. - - """ - # unmarshall parameters into local variables - symHKLs = paramMP['symHKLs'] # the HKLs - symHKLs_ix = paramMP['symHKLs_ix'] # index partitioning of symHKLs - bMat = paramMP['bMat'] - wavelength = paramMP['wavelength'] - omeEdges = paramMP['omeEdges'] - omeTol = paramMP['omeTol'] - omePeriod = paramMP['omePeriod'] - valid_eta_spans = paramMP['valid_eta_spans'] - valid_ome_spans = paramMP['valid_ome_spans'] - omeIndices = paramMP['omeIndices'] - etaEdges = paramMP['etaEdges'] - etaTol = paramMP['etaTol'] - etaIndices = paramMP['etaIndices'] - etaOmeMaps = paramMP['etaOmeMaps'] - threshold = paramMP['threshold'] - - # dpix_ome and dpix_eta are the number of pixels for the tolerance in - # ome/eta. Maybe we should compute this per run instead of - # per-quaternion - del_ome = abs(omeEdges[1] - omeEdges[0]) - del_eta = abs(etaEdges[1] - etaEdges[0]) - dpix_ome = int(round(omeTol / del_ome)) - dpix_eta = int(round(etaTol / del_eta)) - - debug = False - if debug: - print("using ome, eta dilitations of (%d, %d) pixels" - % (dpix_ome, dpix_eta)) - - # get the equivalent rotation of the quaternion in matrix form (as - # expected by oscillAnglesOfHKLs - - rMat = xfcapi.makeRotMatOfQuat(quat) - - # Compute the oscillation angles of all the symHKLs at once - oangs_pair = xfcapi.oscillAnglesOfHKLs(symHKLs, 0., rMat, bMat, - wavelength) - hkl_idx, eta_idx, ome_idx = _filter_angs(oangs_pair[0], oangs_pair[1], - symHKLs_ix, etaEdges, - valid_eta_spans, omeEdges, - valid_ome_spans, omePeriod) - - if len(hkl_idx > 0): - hits, predicted = _count_hits( - eta_idx, ome_idx, hkl_idx, etaOmeMaps, - etaIndices, omeIndices, dpix_eta, dpix_ome, - threshold) - retval = float(hits) / float(predicted) - if retval > 1: - import pdb - pdb.set_trace() - return retval - - def _normalize_angs_hkls(angs_0, angs_1, omePeriod, symHKLs_ix): - # Interleave the two produced oang solutions to simplify later - # processing - oangs = np.empty((len(angs_0)*2, 3), dtype=angs_0.dtype) - oangs[0::2] = angs_0 - oangs[1::2] = angs_1 - - # Map all of the angles at once - oangs[:, 1] = xfcapi.mapAngle(oangs[:, 1]) - oangs[:, 2] = xfcapi.mapAngle(oangs[:, 2], omePeriod) - - # generate array of symHKLs indices - symHKLs_ix = symHKLs_ix*2 - hkl_idx = np.empty((symHKLs_ix[-1],), dtype=int) - start = symHKLs_ix[0] - idx = 0 - for end in symHKLs_ix[1:]: - hkl_idx[start:end] = idx - start = end - idx += 1 - - return oangs, hkl_idx - - def _filter_angs(angs_0, angs_1, symHKLs_ix, etaEdges, valid_eta_spans, - omeEdges, valid_ome_spans, omePeriod): - """Part of paintGridThis. - - Bakes data in a way that invalid (nan or out-of-bound) is discarded. - - Parameters - ---------- - angs_0 : TYPE - DESCRIPTION. - angs_1 : TYPE - DESCRIPTION. - symHKLs_ix : TYPE - DESCRIPTION. - etaEdges : TYPE - DESCRIPTION. - valid_eta_spans : TYPE - DESCRIPTION. - omeEdges : TYPE - DESCRIPTION. - valid_ome_spans : TYPE - DESCRIPTION. - omePeriod : TYPE - DESCRIPTION. - - Returns - ------- - hkl_idx : ndarray - associate hkl indices. - eta_idx : ndarray - associated eta indices of predicted. - ome_idx : ndarray - associated ome indices of predicted. - - """ - oangs, hkl_idx = _normalize_angs_hkls( - angs_0, angs_1, omePeriod, symHKLs_ix) - # using "right" side to make sure we always get an index *past* the - # value if it happens to be equal; i.e. we search the index of the - # first value that is "greater than" rather than "greater or equal" - culled_eta_indices = np.searchsorted(etaEdges, oangs[:, 1], - side='right') - culled_ome_indices = np.searchsorted(omeEdges, oangs[:, 2], - side='right') - # this check is equivalent to validateAngleRanges: - # - # The spans contains an ordered sucession of start and end angles which - # form the valid angle spans. So knowing if an angle is valid is - # equivalent to finding the insertion point in the spans array and - # checking if the resulting insertion index is odd or even. An odd - # value means that it falls between a start and a end point of the - # "valid span", meaning it is a hit. An even value will result in - # either being out of the range (0 or the last index, as length is even - # by construction) or that it falls between a "end" point from one span - # and the "start" point of the next one. - valid_eta = np.searchsorted(valid_eta_spans, oangs[:, 1], side='right') - valid_ome = np.searchsorted(valid_ome_spans, oangs[:, 2], side='right') - # fast odd/even check - valid_eta = valid_eta & 1 - valid_ome = valid_ome & 1 - # Create a mask of the good ones - valid = ~np.isnan(oangs[:, 0]) # tth not NaN - valid = np.logical_and(valid, valid_eta) - valid = np.logical_and(valid, valid_ome) - valid = np.logical_and(valid, culled_eta_indices > 0) - valid = np.logical_and(valid, culled_eta_indices < len(etaEdges)) - valid = np.logical_and(valid, culled_ome_indices > 0) - valid = np.logical_and(valid, culled_ome_indices < len(omeEdges)) - - hkl_idx = hkl_idx[valid] - eta_idx = culled_eta_indices[valid] - 1 - ome_idx = culled_ome_indices[valid] - 1 - - return hkl_idx, eta_idx, ome_idx - - def _count_hits(eta_idx, ome_idx, hkl_idx, etaOmeMaps, - etaIndices, omeIndices, dpix_eta, dpix_ome, threshold): - """ - Part of paintGridThis. - - for every eta, ome, hkl check if there is a sample that surpasses the - threshold in the eta ome map. - """ - predicted = len(hkl_idx) - hits = 0 - - for curr_ang in range(predicted): - culledEtaIdx = eta_idx[curr_ang] - culledOmeIdx = ome_idx[curr_ang] - iHKL = hkl_idx[curr_ang] - # got a result - eta = etaIndices[culledEtaIdx] - ome = omeIndices[culledOmeIdx] - isHit = _check_dilated(eta, ome, dpix_eta, dpix_ome, - etaOmeMaps[iHKL], threshold[iHKL]) - - if isHit > 0: - hits += 1 - if isHit == -1: - predicted -= 1 - - return hits, predicted + li = mi + 1 + + return li + + +@numba.njit(nogil=True, cache=True) +def _angle_is_hit( + ang, + eta_offset, + ome_offset, + hkl, + valid_eta_spans, + valid_ome_spans, + etaEdges, + omeEdges, + etaOmeMaps, + etaIndices, + omeIndices, + dpix_eta, + dpix_ome, + threshold, +): + """Perform work on one of the angles. + + This includes: + + - filtering nan values + + - filtering out angles not in the specified spans + + - checking that the discretized angle fits into the sensor range (maybe + this could be merged with the previous test somehow, for extra speed) + + - actual check for a hit, using dilation for the tolerance. + + Note the function returns both, if it was a hit and if it passed the + filtering, as we'll want to discard the filtered values when computing + the hit percentage. + + CAVEAT: added map-based nan filtering to _check_dilated; this may not + be the best option. Perhaps filter here? + + """ + tth, eta, ome = ang + + if np.isnan(tth): + return 0, 0 + + eta = _map_angle(eta, eta_offset) + if _find_in_range(eta, valid_eta_spans) & 1 == 0: + # index is even: out of valid eta spans + return 0, 0 + + ome = _map_angle(ome, ome_offset) + if _find_in_range(ome, valid_ome_spans) & 1 == 0: + # index is even: out of valid ome spans + return 0, 0 + + # discretize the angles + eta_idx = _find_in_range(eta, etaEdges) - 1 + if eta_idx < 0: + # out of range + return 0, 0 + + ome_idx = _find_in_range(ome, omeEdges) - 1 + if ome_idx < 0: + # out of range + return 0, 0 + + eta = etaIndices[eta_idx] + ome = omeIndices[ome_idx] + isHit = _check_dilated( + eta, ome, dpix_eta, dpix_ome, etaOmeMaps[hkl], threshold[hkl] + ) + if isHit == -1: + return 0, 0 + else: + return isHit, 1 + + +@numba.njit(nogil=True, cache=True) +def _filter_and_count_hits( + angs_0, + angs_1, + symHKLs_ix, + etaEdges, + valid_eta_spans, + valid_ome_spans, + omeEdges, + omePeriod, + etaOmeMaps, + etaIndices, + omeIndices, + dpix_eta, + dpix_ome, + threshold, +): + """Accumulate completeness scores. + + assumes: + we want etas in -pi -> pi range + we want omes in ome_offset -> ome_offset + 2*pi range + + Instead of creating an array with the angles of angs_0 and angs_1 + interleaved, in this numba version calls for both arrays are performed + getting the angles from angs_0 and angs_1. this is done in this way to + reuse hkl computation. This may not be that important, though. + + """ + eta_offset = -np.pi + ome_offset = np.min(omePeriod) + hits = 0 + total = 0 + curr_hkl_idx = 0 + end_curr = symHKLs_ix[1] + count = len(angs_0) + + for i in range(count): + if i >= end_curr: + curr_hkl_idx += 1 + end_curr = symHKLs_ix[curr_hkl_idx + 1] + + # first solution + hit, not_filtered = _angle_is_hit( + angs_0[i], + eta_offset, + ome_offset, + curr_hkl_idx, + valid_eta_spans, + valid_ome_spans, + etaEdges, + omeEdges, + etaOmeMaps, + etaIndices, + omeIndices, + dpix_eta, + dpix_ome, + threshold, + ) + hits += hit + total += not_filtered + + # second solution + hit, not_filtered = _angle_is_hit( + angs_1[i], + eta_offset, + ome_offset, + curr_hkl_idx, + valid_eta_spans, + valid_ome_spans, + etaEdges, + omeEdges, + etaOmeMaps, + etaIndices, + omeIndices, + dpix_eta, + dpix_ome, + threshold, + ) + hits += hit + total += not_filtered + + return float(hits) / float(total) if total != 0 else 0.0 + + +@numba.njit(nogil=True, cache=True) +def _map_angle(angle, offset): + """Numba-firendly equivalent to xf.mapAngle.""" + return np.mod(angle - offset, 2 * np.pi) + offset diff --git a/hexrd/instrument/detector.py b/hexrd/instrument/detector.py index cec8c186d..239e9d954 100644 --- a/hexrd/instrument/detector.py +++ b/hexrd/instrument/detector.py @@ -3,6 +3,7 @@ import os import numpy as np +import numba from hexrd import constants as ct from hexrd import distortion as distortion_pkg @@ -24,17 +25,12 @@ from hexrd.utils.decorators import memoize from hexrd.gridutil import cellIndices -if ct.USE_NUMBA: - import numba distortion_registry = distortion_pkg.Registry() max_workers_DFLT = max(1, os.cpu_count() - 1) -panel_calibration_flags_DFLT = np.array( - [1, 1, 1, 1, 1, 1], - dtype=bool -) +panel_calibration_flags_DFLT = np.array([1, 1, 1, 1, 1, 1], dtype=bool) beam_energy_DFLT = 65.351 @@ -51,17 +47,24 @@ class Detector: common to planar and cylindrical detectors. This class will be inherited by both those classes. """ + __pixelPitchUnit = 'mm' # Abstract methods that must be redefined in derived classes @property @abstractmethod def detector_type(self): - pass + raise NotImplementedError @abstractmethod - def cart_to_angles(self, xy_data, rmat_s=None, tvec_s=None, - tvec_c=None, apply_distortion=False): + def cart_to_angles( + self, + xy_data, + rmat_s=None, + tvec_s=None, + tvec_c=None, + apply_distortion=False, + ): """ Transform cartesian coordinates to angular. @@ -91,13 +94,18 @@ def cart_to_angles(self, xy_data, rmat_s=None, tvec_s=None, DESCRIPTION. """ - pass + raise NotImplementedError @abstractmethod - def angles_to_cart(self, tth_eta, - rmat_s=None, tvec_s=None, - rmat_c=None, tvec_c=None, - apply_distortion=False): + def angles_to_cart( + self, + tth_eta, + rmat_s=None, + tvec_s=None, + rmat_c=None, + tvec_c=None, + apply_distortion=False, + ): """ Transform angular coordinates to cartesian. @@ -126,24 +134,24 @@ def angles_to_cart(self, tth_eta, The (n, 2) array on the n input coordinates in the . """ - pass + raise NotImplementedError @abstractmethod def cart_to_dvecs(self, xy_data): """Convert cartesian coordinates to dvectors""" - pass + raise NotImplementedError @abstractmethod def pixel_angles(self, origin=ct.zeros_3): - pass + raise NotImplementedError @abstractmethod def pixel_tth_gradient(self, origin=ct.zeros_3): - pass + raise NotImplementedError @abstractmethod def pixel_eta_gradient(self, origin=ct.zeros_3): - pass + raise NotImplementedError @property @abstractmethod @@ -152,7 +160,8 @@ def beam_position(self): returns the coordinates of the beam in the cartesian detector frame {Xd, Yd, Zd}. NaNs if no intersection. """ - pass + raise NotImplementedError + @property def extra_config_kwargs(self): @@ -160,21 +169,25 @@ def extra_config_kwargs(self): # End of abstract methods - def __init__(self, - rows=2048, cols=2048, - pixel_size=(0.2, 0.2), - tvec=np.r_[0., 0., -1000.], - tilt=ct.zeros_3, - name='default', - bvec=ct.beam_vec, - xrs_dist=None, - evec=ct.eta_vec, - saturation_level=None, - panel_buffer=None, - tth_distortion=None, - roi=None, group=None, - distortion=None, - max_workers=max_workers_DFLT): + def __init__( + self, + rows=2048, + cols=2048, + pixel_size=(0.2, 0.2), + tvec=np.r_[0.0, 0.0, -1000.0], + tilt=ct.zeros_3, + name='default', + bvec=ct.beam_vec, + xrs_dist=None, + evec=ct.eta_vec, + saturation_level=None, + panel_buffer=None, + tth_distortion=None, + roi=None, + group=None, + distortion=None, + max_workers=max_workers_DFLT, + ): """ Instantiate a PlanarDetector object. @@ -231,10 +244,11 @@ def __init__(self, if roi is None: self._roi = roi else: - assert len(roi) == 2, \ - "roi is set via (start_row, start_col)" - self._roi = ((roi[0], roi[0] + self._rows), - (roi[1], roi[1] + self._cols)) + assert len(roi) == 2, "roi is set via (start_row, start_col)" + self._roi = ( + (roi[0], roi[0] + self._rows), + (roi[1], roi[1] + self._cols), + ) self._tvec = np.array(tvec).flatten() self._tilt = np.array(tilt).flatten() @@ -260,12 +274,11 @@ def __init__(self, if self._distortion is not None: dparams = self._distortion.params self._calibration_parameters = np.hstack( - [self._tilt, self._tvec, dparams] - ) + [self._tilt, self._tvec, dparams] + ) self._calibration_flags = np.hstack( - [panel_calibration_flags_DFLT, - np.zeros(len(dparams), dtype=bool)] - ) + [panel_calibration_flags_DFLT, np.zeros(len(dparams), dtype=bool)] + ) # detector ID @property @@ -363,10 +376,13 @@ def roi(self, vertex_array): !!! vertex array must be (r0, c0) """ if vertex_array is not None: - assert len(vertex_array) == 2, \ - "roi is set via (start_row, start_col)" - self._roi = ((vertex_array[0], vertex_array[0] + self.rows), - (vertex_array[1], vertex_array[1] + self.cols)) + assert ( + len(vertex_array) == 2 + ), "roi is set via (start_row, start_col)" + self._roi = ( + (vertex_array[0], vertex_array[0] + self.rows), + (vertex_array[1], vertex_array[1] + self.cols), + ) @property def row_dim(self): @@ -378,7 +394,9 @@ def col_dim(self): @property def row_pixel_vec(self): - return self.pixel_size_row*(0.5*(self.rows-1)-np.arange(self.rows)) + return self.pixel_size_row * ( + 0.5 * (self.rows - 1) - np.arange(self.rows) + ) @property def row_edge_vec(self): @@ -386,7 +404,9 @@ def row_edge_vec(self): @property def col_pixel_vec(self): - return self.pixel_size_col*(np.arange(self.cols)-0.5*(self.cols-1)) + return self.pixel_size_col * ( + np.arange(self.cols) - 0.5 * (self.cols - 1) + ) @property def col_edge_vec(self): @@ -394,7 +414,7 @@ def col_edge_vec(self): @property def corner_ul(self): - return np.r_[-0.5 * self.col_dim, 0.5 * self.row_dim] + return np.r_[-0.5 * self.col_dim, 0.5 * self.row_dim] @property def corner_ll(self): @@ -406,7 +426,7 @@ def corner_lr(self): @property def corner_ur(self): - return np.r_[0.5 * self.col_dim, 0.5 * self.row_dim] + return np.r_[0.5 * self.col_dim, 0.5 * self.row_dim] @property def shape(self): @@ -438,8 +458,9 @@ def bvec(self): @bvec.setter def bvec(self, x): x = np.array(x).flatten() - assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \ - 'input must have length = 3 and have unit magnitude' + assert ( + len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' self._bvec = x @property @@ -448,8 +469,9 @@ def xrs_dist(self): @xrs_dist.setter def xrs_dist(self, x): - assert x is None or np.isscalar(x), \ - f"'source_distance' must be None or scalar; you input '{x}'" + assert x is None or np.isscalar( + x + ), f"'source_distance' must be None or scalar; you input '{x}'" self._xrs_dist = x @property @@ -459,8 +481,9 @@ def evec(self): @evec.setter def evec(self, x): x = np.array(x).flatten() - assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \ - 'input must have length = 3 and have unit magnitude' + assert ( + len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' self._evec = x @property @@ -473,8 +496,7 @@ def distortion(self, x): check_arg = np.zeros(len(distortion_registry), dtype=bool) for i, dcls in enumerate(distortion_registry.values()): check_arg[i] = isinstance(x, dcls) - assert np.any(check_arg), \ - 'input distortion is not in registry!' + assert np.any(check_arg), 'input distortion is not in registry!' self._distortion = x @property @@ -489,8 +511,8 @@ def normal(self): @property def pixel_coords(self): pix_i, pix_j = np.meshgrid( - self.row_pixel_vec, self.col_pixel_vec, - indexing='ij') + self.row_pixel_vec, self.col_pixel_vec, indexing='ij' + ) return pix_i, pix_j @property @@ -505,8 +527,8 @@ def calibration_parameters(self): if self.distortion is not None: dparams = self.distortion.params self._calibration_parameters = np.hstack( - [self.tilt, self.tvec, dparams] - ) + [self.tilt, self.tvec, dparams] + ) return self._calibration_parameters @property @@ -572,14 +594,18 @@ def polarization_factor(self, f_hor, f_vert, unpolarized=False): """ s = f_hor + f_vert if np.abs(s - 1) > ct.sqrt_epsf: - msg = ("sum of fraction of " - "horizontal and vertical polarizations " - "must be equal to 1.") + msg = ( + "sum of fraction of " + "horizontal and vertical polarizations " + "must be equal to 1." + ) raise RuntimeError(msg) if f_hor < 0 or f_vert < 0: - msg = ("fraction of polarization in horizontal " - "or vertical directions can't be negative.") + msg = ( + "fraction of polarization in horizontal " + "or vertical directions can't be negative." + ) raise RuntimeError(msg) tth, eta = self.pixel_angles() @@ -615,9 +641,16 @@ def lorentz_factor(self): tth, eta = self.pixel_angles() return _lorentz_factor(tth) - def config_dict(self, chi=0, tvec=ct.zeros_3, - beam_energy=beam_energy_DFLT, beam_vector=ct.beam_vec, - sat_level=None, panel_buffer=None, style='yaml'): + def config_dict( + self, + chi=0, + tvec=ct.zeros_3, + beam_energy=beam_energy_DFLT, + beam_vector=ct.beam_vec, + sat_level=None, + panel_buffer=None, + style='yaml', + ): """ Return a dictionary of detector parameters. @@ -645,8 +678,9 @@ def config_dict(self, chi=0, tvec=ct.zeros_3, DESCRIPTION. """ - assert style.lower() in ['yaml', 'hdf5'], \ + assert style.lower() in ['yaml', 'hdf5'], ( "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style + ) config_dict = {} @@ -658,8 +692,11 @@ def config_dict(self, chi=0, tvec=ct.zeros_3, # assign local vars; listify if necessary tilt = self.tilt translation = self.tvec - roi = None if self.roi is None \ + roi = ( + None + if self.roi is None else np.array([self.roi[0][0], self.roi[1][0]]).flatten() + ) if style.lower() == 'yaml': tilt = tilt.tolist() translation = translation.tolist() @@ -675,9 +712,8 @@ def config_dict(self, chi=0, tvec=ct.zeros_3, pixels=dict( rows=int(self.rows), columns=int(self.cols), - size=[float(self.pixel_size_row), - float(self.pixel_size_col)], - ) + size=[float(self.pixel_size_row), float(self.pixel_size_col)], + ), ) if roi is not None: @@ -694,8 +730,7 @@ def config_dict(self, chi=0, tvec=ct.zeros_3, if style.lower() == 'yaml': dparams = dparams.tolist() dist_d = dict( - function_name=self.distortion.maptype, - parameters=dparams + function_name=self.distortion.maptype, parameters=dparams ) det_dict['distortion'] = dist_d @@ -711,8 +746,7 @@ def config_dict(self, chi=0, tvec=ct.zeros_3, # !!! now we have to do some style-dependent munging of panel_buffer if isinstance(panel_buffer, np.ndarray): if panel_buffer.ndim == 1: - assert len(panel_buffer) == 2, \ - "length of 1-d buffer must be 2" + assert len(panel_buffer) == 2, "length of 1-d buffer must be 2" # if here is a 2-element array if style.lower() == 'yaml': panel_buffer = panel_buffer.tolist() @@ -721,7 +755,7 @@ def config_dict(self, chi=0, tvec=ct.zeros_3, # !!! can't practically write array-like buffers to YAML # so forced to clobber print("clobbering panel buffer array in yaml-ready output") - panel_buffer = [0., 0.] + panel_buffer = [0.0, 0.0] else: raise RuntimeError( "panel buffer ndim must be 1 or 2; you specified %d" @@ -730,11 +764,10 @@ def config_dict(self, chi=0, tvec=ct.zeros_3, elif panel_buffer is None: # still None on self # !!! this gets handled by unwrap_dict_to_h5 now - ''' - if style.lower() == 'hdf5': - # !!! can't write None to hdf5; substitute with zeros - panel_buffer = np.r_[0., 0.] - ''' + + # if style.lower() == 'hdf5': + # # !!! can't write None to hdf5; substitute with zeros + # panel_buffer = np.r_[0., 0.] pass det_dict['buffer'] = panel_buffer @@ -743,10 +776,7 @@ def config_dict(self, chi=0, tvec=ct.zeros_3, # ===================================================================== # SAMPLE STAGE PARAMETERS # ===================================================================== - stage_dict = dict( - chi=chi, - translation=tvec - ) + stage_dict = dict(chi=chi, translation=tvec) # ===================================================================== # BEAM PARAMETERS @@ -760,10 +790,7 @@ def config_dict(self, chi=0, tvec=ct.zeros_3, # polar_angle=pola # ) # ) - beam_dict = dict( - energy=beam_energy, - vector=beam_vector - ) + beam_dict = dict(energy=beam_energy, vector=beam_vector) config_dict['detector'] = det_dict config_dict['oscillation_stage'] = stage_dict @@ -822,10 +849,10 @@ def pixelToCart(self, ij_det): """ ij_det = np.atleast_2d(ij_det) - x = (ij_det[:, 1] + 0.5)*self.pixel_size_col\ - + self.corner_ll[0] - y = (self.rows - ij_det[:, 0] - 0.5)*self.pixel_size_row\ - + self.corner_ll[1] + x = (ij_det[:, 1] + 0.5) * self.pixel_size_col + self.corner_ll[0] + y = ( + self.rows - ij_det[:, 0] - 0.5 + ) * self.pixel_size_row + self.corner_ll[1] return np.vstack([x, y]).T def angularPixelSize(self, xy, rMat_s=None, tVec_s=None, tVec_c=None): @@ -863,11 +890,17 @@ def angularPixelSize(self, xy, rMat_s=None, tVec_s=None, tVec_c=None): ''' # call xrdutil function ang_ps = xrdutil.angularPixelSize( - xy, (self.pixel_size_row, self.pixel_size_col), - self.rmat, rMat_s, - self.tvec, tVec_s, tVec_c, + xy, + (self.pixel_size_row, self.pixel_size_col), + self.rmat, + rMat_s, + self.tvec, + tVec_s, + tVec_c, distortion=self.distortion, - beamVec=self.bvec, etaVec=self.evec) + beamVec=self.bvec, + etaVec=self.evec, + ) return ang_ps def clip_to_panel(self, xy, buffer_edges=True): @@ -892,8 +925,8 @@ def clip_to_panel(self, xy, buffer_edges=True): on_panel = np.logical_and(on_panel_rows, on_panel_cols) else: ''' - xlim = 0.5*self.col_dim - ylim = 0.5*self.row_dim + xlim = 0.5 * self.col_dim + ylim = 0.5 * self.row_dim if buffer_edges and self.panel_buffer is not None: if self.panel_buffer.ndim == 2: pix = self.cartToPixel(xy, pixels=True) @@ -905,7 +938,9 @@ def clip_to_panel(self, xy, buffer_edges=True): on_panel = np.full(pix.shape[0], False) valid_pix = pix[~idx, :] - on_panel[~idx] = self.panel_buffer[valid_pix[:, 0], valid_pix[:, 1]] + on_panel[~idx] = self.panel_buffer[ + valid_pix[:, 0], valid_pix[:, 1] + ] else: xlim -= self.panel_buffer[0] ylim -= self.panel_buffer[1] @@ -917,12 +952,8 @@ def clip_to_panel(self, xy, buffer_edges=True): ) on_panel = np.logical_and(on_panel_x, on_panel_y) elif not buffer_edges or self.panel_buffer is None: - on_panel_x = np.logical_and( - xy[:, 0] >= -xlim, xy[:, 0] <= xlim - ) - on_panel_y = np.logical_and( - xy[:, 1] >= -ylim, xy[:, 1] <= ylim - ) + on_panel_x = np.logical_and(xy[:, 0] >= -xlim, xy[:, 0] <= xlim) + on_panel_y = np.logical_and(xy[:, 1] >= -ylim, xy[:, 1] <= ylim) on_panel = np.logical_and(on_panel_x, on_panel_y) return xy[on_panel, :], on_panel @@ -933,13 +964,16 @@ def interpolate_nearest(self, xy, img, pad_with_nans=True): """ is_2d = img.ndim == 2 right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols - assert is_2d and right_shape,\ - "input image must be 2-d with shape (%d, %d)"\ - % (self.rows, self.cols) + assert ( + is_2d and right_shape + ), "input image must be 2-d with shape (%d, %d)" % ( + self.rows, + self.cols, + ) # initialize output with nans if pad_with_nans: - int_xy = np.nan*np.ones(len(xy)) + int_xy = np.nan * np.ones(len(xy)) else: int_xy = np.zeros(len(xy)) @@ -955,7 +989,6 @@ def interpolate_nearest(self, xy, img, pad_with_nans=True): int_xy[on_panel] = int_vals return int_xy - def interpolate_bilinear(self, xy, img, pad_with_nans=True): """ Interpolate an image array at the specified cartesian points. @@ -984,13 +1017,16 @@ def interpolate_bilinear(self, xy, img, pad_with_nans=True): is_2d = img.ndim == 2 right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols - assert is_2d and right_shape,\ - "input image must be 2-d with shape (%d, %d)"\ - % (self.rows, self.cols) + assert ( + is_2d and right_shape + ), "input image must be 2-d with shape (%d, %d)" % ( + self.rows, + self.cols, + ) # initialize output with nans if pad_with_nans: - int_xy = np.nan*np.ones(len(xy)) + int_xy = np.nan * np.ones(len(xy)) else: int_xy = np.zeros(len(xy)) @@ -1017,25 +1053,34 @@ def interpolate_bilinear(self, xy, img, pad_with_nans=True): j_ceil_img = _fix_indices(j_ceil, 0, self.cols - 1) # first interpolate at top/bottom rows - row_floor_int = \ - (j_ceil - ij_frac[:, 1])*img[i_floor_img, j_floor_img] \ - + (ij_frac[:, 1] - j_floor)*img[i_floor_img, j_ceil_img] - row_ceil_int = \ - (j_ceil - ij_frac[:, 1])*img[i_ceil_img, j_floor_img] \ - + (ij_frac[:, 1] - j_floor)*img[i_ceil_img, j_ceil_img] + row_floor_int = (j_ceil - ij_frac[:, 1]) * img[ + i_floor_img, j_floor_img + ] + (ij_frac[:, 1] - j_floor) * img[i_floor_img, j_ceil_img] + row_ceil_int = (j_ceil - ij_frac[:, 1]) * img[ + i_ceil_img, j_floor_img + ] + (ij_frac[:, 1] - j_floor) * img[i_ceil_img, j_ceil_img] # next interpolate across cols - int_vals = \ - (i_ceil - ij_frac[:, 0])*row_floor_int \ - + (ij_frac[:, 0] - i_floor)*row_ceil_int + int_vals = (i_ceil - ij_frac[:, 0]) * row_floor_int + ( + ij_frac[:, 0] - i_floor + ) * row_ceil_int int_xy[on_panel] = int_vals return int_xy def make_powder_rings( - self, pd, merge_hkls=False, delta_tth=None, - delta_eta=10., eta_period=None, eta_list=None, - rmat_s=ct.identity_3x3, tvec_s=ct.zeros_3, - tvec_c=ct.zeros_3, full_output=False, tth_distortion=None): + self, + pd, + merge_hkls=False, + delta_tth=None, + delta_eta=10.0, + eta_period=None, + eta_list=None, + rmat_s=ct.identity_3x3, + tvec_s=ct.zeros_3, + tvec_c=ct.zeros_3, + full_output=False, + tth_distortion=None, + ): """ Generate points on Debye_Scherrer rings over the detector. @@ -1080,8 +1125,9 @@ def make_powder_rings( """ if tth_distortion is not None: tnorms = rowNorm(np.vstack([tvec_s, tvec_c])) - assert np.all(tnorms) < ct.sqrt_epsf, \ - "If using distrotion function, translations must be zero" + assert ( + np.all(tnorms) < ct.sqrt_epsf + ), "If using distrotion function, translations must be zero" # in case you want to give it tth angles directly if isinstance(pd, PlaneData): @@ -1110,28 +1156,50 @@ def make_powder_rings( tth = pd.getTTh() tth_pm = tth_ranges - np.tile(tth, (2, 1)).T sector_vertices = np.vstack( - [[i[0], -del_eta, - i[0], del_eta, - i[1], del_eta, - i[1], -del_eta, - 0.0, 0.0] - for i in tth_pm]) + [ + [ + i[0], + -del_eta, + i[0], + del_eta, + i[1], + del_eta, + i[1], + -del_eta, + 0.0, + 0.0, + ] + for i in tth_pm + ] + ) else: # Okay, we have a array-like tth specification tth = np.array(pd).flatten() if delta_tth is None: raise RuntimeError( "If supplying a 2theta list as first arg, " - + "must supply a delta_tth") - tth_pm = 0.5*delta_tth*np.r_[-1., 1.] + + "must supply a delta_tth" + ) + tth_pm = 0.5 * delta_tth * np.r_[-1.0, 1.0] tth_ranges = np.radians([i + tth_pm for i in tth]) # !!! units sector_vertices = np.tile( - 0.5*np.radians([-delta_tth, -delta_eta, - -delta_tth, delta_eta, - delta_tth, delta_eta, - delta_tth, -delta_eta, - 0.0, 0.0]), (len(tth), 1) - ) + 0.5 + * np.radians( + [ + -delta_tth, + -delta_eta, + -delta_tth, + delta_eta, + delta_tth, + delta_eta, + delta_tth, + -delta_eta, + 0.0, + 0.0, + ] + ), + (len(tth), 1), + ) # !! conversions, meh... tth = np.radians(tth) del_eta = np.radians(delta_eta) @@ -1141,13 +1209,12 @@ def make_powder_rings( eta_period = (-np.pi, np.pi) if eta_list is None: - neta = int(360./float(delta_eta)) + neta = int(360.0 / float(delta_eta)) # this is the vector of ETA EDGES eta_edges = mapAngle( - np.radians( - delta_eta*np.linspace(0., neta, num=neta + 1) - ) + eta_period[0], - eta_period + np.radians(delta_eta * np.linspace(0.0, neta, num=neta + 1)) + + eta_period[0], + eta_period, ) # get eta bin centers from edges @@ -1158,13 +1225,13 @@ def make_powder_rings( axis=0) """ # !!! should be safe as eta_edges are monotonic - eta_centers = eta_edges[:-1] + 0.5*del_eta + eta_centers = eta_edges[:-1] + 0.5 * del_eta else: eta_centers = np.radians(eta_list).flatten() neta = len(eta_centers) eta_edges = ( - np.tile(eta_centers, (2, 1)) + - np.tile(0.5*del_eta*np.r_[-1, 1], (neta, 1)).T + np.tile(eta_centers, (2, 1)) + + np.tile(0.5 * del_eta * np.r_[-1, 1], (neta, 1)).T ).T.flatten() # get chi and ome from rmat_s @@ -1174,8 +1241,10 @@ def make_powder_rings( ome = np.arctan2(rmat_s[0, 2], rmat_s[0, 0]) # make list of angle tuples - angs = [np.vstack([i*np.ones(neta), eta_centers, ome*np.ones(neta)]) - for i in tth] + angs = [ + np.vstack([i * np.ones(neta), eta_centers, ome * np.ones(neta)]) + for i in tth + ] # need xy coords and pixel sizes valid_ang = [] @@ -1192,15 +1261,18 @@ def make_powder_rings( patch_vertices = ( np.tile(these_angs[:, :2], (1, npp)) + np.tile(sector_vertices[i_ring], (neta, 1)) - ).reshape(npp*neta, 2) + ).reshape(npp * neta, 2) # find vertices that all fall on the panel # !!! not API ambiguity regarding rmat_s above all_xy = self.angles_to_cart( patch_vertices, - rmat_s=rmat_s, tvec_s=tvec_s, - rmat_c=None, tvec_c=tvec_c, - apply_distortion=True) + rmat_s=rmat_s, + tvec_s=tvec_s, + rmat_c=None, + tvec_c=tvec_c, + apply_distortion=True, + ) _, on_panel = self.clip_to_panel(all_xy) @@ -1215,10 +1287,11 @@ def make_powder_rings( if tth_distortion is not None: patch_valid_angs = tth_distortion.apply( self.angles_to_cart(these_angs[patch_is_on, :2]), - return_nominal=True + return_nominal=True, + ) + patch_valid_xys = self.angles_to_cart( + patch_valid_angs, apply_distortion=True ) - patch_valid_xys = self.angles_to_cart(patch_valid_angs, - apply_distortion=True) else: patch_valid_angs = these_angs[patch_is_on, :2] patch_valid_xys = patch_xys[:, -1, :].squeeze() @@ -1227,7 +1300,6 @@ def make_powder_rings( valid_ang.append(patch_valid_angs) valid_xy.append(patch_valid_xys) map_indices.append(patch_is_on) - pass # ??? is this option necessary? if full_output: return valid_ang, valid_xy, tth_ranges, map_indices, eta_edges @@ -1275,20 +1347,30 @@ def map_to_plane(self, pts, rmat, tvec): pts_lab = np.dot(self.rmat, pts_det.T) + tvec_d_lab # scaling along pts vectors to hit map plane - u = np.dot(nvec_map_lab.T, tvec_map_lab) \ - / np.dot(nvec_map_lab.T, pts_lab) + u = np.dot(nvec_map_lab.T, tvec_map_lab) / np.dot( + nvec_map_lab.T, pts_lab + ) # pts on map plane, in LAB FRAME pts_map_lab = np.tile(u, (3, 1)) * pts_lab return np.dot(rmat.T, pts_map_lab - tvec_map_lab)[:2, :].T - def simulate_rotation_series(self, plane_data, grain_param_list, - eta_ranges=[(-np.pi, np.pi), ], - ome_ranges=[(-np.pi, np.pi), ], - ome_period=(-np.pi, np.pi), - chi=0., tVec_s=ct.zeros_3, - wavelength=None): + def simulate_rotation_series( + self, + plane_data, + grain_param_list, + eta_ranges=[ + (-np.pi, np.pi), + ], + ome_ranges=[ + (-np.pi, np.pi), + ], + ome_period=(-np.pi, np.pi), + chi=0.0, + tVec_s=ct.zeros_3, + wavelength=None, + ): """ Simulate a monochromatic rotation series for a list of grains. @@ -1336,8 +1418,9 @@ def simulate_rotation_series(self, plane_data, grain_param_list, else: if plane_data.wavelength != wavelength: plane_data.wavelength = ct.keVToAngstrom(wavelength) - assert not np.any(np.isnan(plane_data.getTTh())),\ - "plane data exclusions incompatible with wavelength" + assert not np.any( + np.isnan(plane_data.getTTh()) + ), "plane data exclusions incompatible with wavelength" # vstacked G-vector id, h, k, l full_hkls = xrdutil._fetch_hkls_from_planedata(plane_data) @@ -1359,25 +1442,33 @@ def simulate_rotation_series(self, plane_data, grain_param_list, # for each omega solution angList = np.vstack( oscillAnglesOfHKLs( - full_hkls[:, 1:], chi, - rMat_c, bMat, wavelength, + full_hkls[:, 1:], + chi, + rMat_c, + bMat, + wavelength, vInv=vInv_s, - ) ) + ) # filter by eta and omega ranges # ??? get eta range from detector? allAngs, allHKLs = xrdutil._filter_hkls_eta_ome( full_hkls, angList, eta_ranges, ome_ranges - ) + ) allAngs[:, 2] = mapAngle(allAngs[:, 2], ome_period) # find points that fall on the panel det_xy, rMat_s, on_plane = xrdutil._project_on_detector_plane( allAngs, - self.rmat, rMat_c, chi, - self.tvec, tVec_c, tVec_s, - self.distortion) + self.rmat, + rMat_c, + chi, + self.tvec, + tVec_c, + tVec_s, + self.distortion, + ) xys_p, on_panel = self.clip_to_panel(det_xy) valid_xys.append(xys_p) @@ -1399,13 +1490,17 @@ def simulate_rotation_series(self, plane_data, grain_param_list, ang_pixel_size.append(self.angularPixelSize(xys_p)) return valid_ids, valid_hkls, valid_angs, valid_xys, ang_pixel_size - def simulate_laue_pattern(self, crystal_data, - minEnergy=5., maxEnergy=35., - rmat_s=None, tvec_s=None, - grain_params=None, - beam_vec=None): - """ - """ + def simulate_laue_pattern( + self, + crystal_data, + minEnergy=5.0, + maxEnergy=35.0, + rmat_s=None, + tvec_s=None, + grain_params=None, + beam_vec=None, + ): + """ """ if isinstance(crystal_data, PlaneData): plane_data = crystal_data @@ -1437,8 +1532,9 @@ def simulate_laue_pattern(self, crystal_data, # TODO: allow for spectrum parsing multipleEnergyRanges = False if hasattr(maxEnergy, '__len__'): - assert len(maxEnergy) == len(minEnergy), \ - 'energy cutoff ranges must have the same length' + assert len(maxEnergy) == len( + minEnergy + ), 'energy cutoff ranges must have the same length' multipleEnergyRanges = True lmin = [] lmax = [] @@ -1473,11 +1569,11 @@ def simulate_laue_pattern(self, crystal_data, # ========================================================================= # pre-allocate output arrays - xy_det = np.nan*np.ones((n_grains, nhkls_tot, 2)) - hkls_in = np.nan*np.ones((n_grains, 3, nhkls_tot)) - angles = np.nan*np.ones((n_grains, nhkls_tot, 2)) - dspacing = np.nan*np.ones((n_grains, nhkls_tot)) - energy = np.nan*np.ones((n_grains, nhkls_tot)) + xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2)) + hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot)) + angles = np.nan * np.ones((n_grains, nhkls_tot, 2)) + dspacing = np.nan * np.ones((n_grains, nhkls_tot)) + energy = np.nan * np.ones((n_grains, nhkls_tot)) for iG, gp in enumerate(grain_params): rmat_c = makeRotMatOfExpMap(gp[:3]) tvec_c = gp[3:6].reshape(3, 1) @@ -1488,10 +1584,16 @@ def simulate_laue_pattern(self, crystal_data, ghat_c_str = mutil.unitVector(np.dot(rmat_c.T, gvec_s_str)) # project - dpts = gvec_to_xy(ghat_c_str.T, - self.rmat, rmat_s, rmat_c, - self.tvec, tvec_s, tvec_c, - beam_vec=beam_vec) + dpts = gvec_to_xy( + ghat_c_str.T, + self.rmat, + rmat_s, + rmat_c, + self.tvec, + tvec_s, + tvec_c, + beam_vec=beam_vec, + ) # check intersections with detector plane canIntersect = ~np.isnan(dpts[:, 0]) @@ -1504,9 +1606,13 @@ def simulate_laue_pattern(self, crystal_data, # back to angles tth_eta, gvec_l = detectorXYToGvec( dpts, - self.rmat, rmat_s, - self.tvec, tvec_s, tvec_c, - beamVec=beam_vec) + self.rmat, + rmat_s, + self.tvec, + tvec_s, + tvec_c, + beamVec=beam_vec, + ) tth_eta = np.vstack(tth_eta).T # warp measured points @@ -1514,8 +1620,8 @@ def simulate_laue_pattern(self, crystal_data, dpts = self.distortion.apply_inverse(dpts) # plane spacings and energies - dsp = 1. / rowNorm(gvec_s_str[:, canIntersect].T) - wlen = 2*dsp*np.sin(0.5*tth_eta[:, 0]) + dsp = 1.0 / rowNorm(gvec_s_str[:, canIntersect].T) + wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0]) # clip to detector panel _, on_panel = self.clip_to_panel(dpts, buffer_edges=True) @@ -1524,13 +1630,11 @@ def simulate_laue_pattern(self, crystal_data, validEnergy = np.zeros(len(wlen), dtype=bool) for i in range(len(lmin)): in_energy_range = np.logical_and( - wlen >= lmin[i], - wlen <= lmax[i]) + wlen >= lmin[i], wlen <= lmax[i] + ) validEnergy = validEnergy | in_energy_range - pass else: validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) - pass # index for valid reflections keepers = np.where(np.logical_and(on_panel, validEnergy))[0] @@ -1541,8 +1645,6 @@ def simulate_laue_pattern(self, crystal_data, angles[iG][keepers, :] = tth_eta[keepers, :] dspacing[iG, keepers] = dsp[keepers] energy[iG, keepers] = ct.keVToAngstrom(wlen[keepers]) - pass # close conditional on valids - pass # close loop on grains return xy_det, hkls_in, angles, dspacing, energy @staticmethod @@ -1567,6 +1669,7 @@ def increase_memoization_sizes(funcs, min_size): # UTILITY METHODS # ============================================================================= + def _fix_indices(idx, lo, hi): nidx = np.array(idx) off_lo = nidx < lo @@ -1577,36 +1680,26 @@ def _fix_indices(idx, lo, hi): def _row_edge_vec(rows, pixel_size_row): - return pixel_size_row*(0.5*rows-np.arange(rows+1)) + return pixel_size_row * (0.5 * rows - np.arange(rows + 1)) def _col_edge_vec(cols, pixel_size_col): - return pixel_size_col*(np.arange(cols+1)-0.5*cols) + return pixel_size_col * (np.arange(cols + 1) - 0.5 * cols) # FIXME find a better place for this, and maybe include loop over pixels -if ct.USE_NUMBA: - @numba.njit(nogil=True, cache=True) - def _solid_angle_of_triangle(vtx_list): - norms = np.sqrt(np.sum(vtx_list*vtx_list, axis=1)) - norms_prod = norms[0] * norms[1] * norms[2] - scalar_triple_product = np.dot(vtx_list[0], - np.cross(vtx_list[2], vtx_list[1])) - denominator = norms_prod \ - + norms[0]*np.dot(vtx_list[1], vtx_list[2]) \ - + norms[1]*np.dot(vtx_list[2], vtx_list[0]) \ - + norms[2]*np.dot(vtx_list[0], vtx_list[1]) - - return 2.*np.arctan2(scalar_triple_product, denominator) -else: - def _solid_angle_of_triangle(vtx_list): - norms = rowNorm(vtx_list) - norms_prod = np.cumprod(norms)[-1] - scalar_triple_product = np.dot(vtx_list[0], - np.cross(vtx_list[2], vtx_list[1])) - denominator = norms_prod \ - + norms[0]*np.dot(vtx_list[1], vtx_list[2]) \ - + norms[1]*np.dot(vtx_list[2], vtx_list[0]) \ - + norms[2]*np.dot(vtx_list[0], vtx_list[1]) - - return 2.*np.arctan2(scalar_triple_product, denominator) +@numba.njit(nogil=True, cache=True) +def _solid_angle_of_triangle(vtx_list): + norms = np.sqrt(np.sum(vtx_list * vtx_list, axis=1)) + norms_prod = norms[0] * norms[1] * norms[2] + scalar_triple_product = np.dot( + vtx_list[0], np.cross(vtx_list[2], vtx_list[1]) + ) + denominator = ( + norms_prod + + norms[0] * np.dot(vtx_list[1], vtx_list[2]) + + norms[1] * np.dot(vtx_list[2], vtx_list[0]) + + norms[2] * np.dot(vtx_list[0], vtx_list[1]) + ) + + return 2.0 * np.arctan2(scalar_triple_product, denominator) diff --git a/hexrd/instrument/hedm_instrument.py b/hexrd/instrument/hedm_instrument.py index 5f080cec7..af2c106dd 100644 --- a/hexrd/instrument/hedm_instrument.py +++ b/hexrd/instrument/hedm_instrument.py @@ -486,7 +486,6 @@ def pixel_resolution(instr): max_eta = min(max_eta, np.min(angps[:, 1])) min_tth = max(min_tth, np.max(angps[:, 0])) min_eta = max(min_eta, np.max(angps[:, 1])) - pass med_tth, med_eta = np.median(np.vstack(ang_ps_full), axis=0).flatten() return (min_tth, med_tth, max_tth), (min_eta, med_eta, max_eta) @@ -1163,7 +1162,6 @@ def extract_polar_maps(self, plane_data, imgser_dict, except ValueError: raise RuntimeError(f"hklID '{input_hklID}' is invalid") tth_ranges = tth_ranges[idx] - pass # end of active_hkls handling delta_eta = eta_edges[1] - eta_edges[0] ncols_eta = len(eta_edges) - 1 @@ -1922,7 +1920,6 @@ def pull_spots(self, plane_data, grain_params, tmp > threshold ) patch_data_raw.append(tmp) - pass patch_data_raw = np.stack(patch_data_raw, axis=0) compl.append(contains_signal) @@ -1965,7 +1962,6 @@ def pull_spots(self, plane_data, grain_params, ) else: closest_peak_idx = 0 - pass # end multipeak conditional coms = coms[closest_peak_idx] # meas_omes = \ # ome_edges[0] + (0.5 + coms[0])*delta_ome @@ -2019,10 +2015,8 @@ def pull_spots(self, plane_data, grain_params, ).flatten() # FIXME: why is this suddenly necessary??? meas_xy = meas_xy.squeeze() - pass # end num_peaks > 0 else: patch_data = patch_data_raw - pass # end contains_signal if peak_id < 0: # The peak is invalid. @@ -2046,8 +2040,6 @@ def pull_spots(self, plane_data, grain_params, xyc_arr, ijs, frame_indices, patch_data, ang_centers[i_pt], xy_centers[i_pt], meas_angs, meas_xy) - pass # end conditional on write output - pass # end conditional on check only if return_spot_list: # Full output @@ -2069,12 +2061,9 @@ def pull_spots(self, plane_data, grain_params, ] patch_output.append(_patch_output) iRefl += 1 - pass # end patch conditional - pass # end patch loop output[detector_id] = patch_output if filename is not None and output_format.lower() == 'text': writer.close() - pass # end detector loop if filename is not None and output_format.lower() == 'hdf5': writer.close() return compl, output @@ -2450,7 +2439,6 @@ def __init__(self, image_series_dict, instrument, plane_data, [this_det_ims.omega_to_frame(ome)[0] != -1 for ome in ome_centers] ) - pass # end multi-wedge case # ???: need to pass a threshold? eta_mapping, etas = instrument.extract_polar_maps( @@ -2542,7 +2530,6 @@ def omegas(self): def save(self, filename): xrdutil.EtaOmeMaps.save_eta_ome_maps(self, filename) - pass # end of class: GenerateEtaOmeMaps def _generate_ring_params(tthr, ptth, peta, eta_edges, delta_eta): @@ -2811,7 +2798,6 @@ def _extract_ring_line_positions(iter_args, instr_cfg, panel, eta_tol, npdiv, fit_data.append(result) else: ims_data.append(p_img) - pass # close image loop if not collapse_tth: output = [ang_data, ims_data] if do_fitting: diff --git a/hexrd/material/crystallography.py b/hexrd/material/crystallography.py index 2ab5b1738..63beb532f 100644 --- a/hexrd/material/crystallography.py +++ b/hexrd/material/crystallography.py @@ -1854,7 +1854,6 @@ def getFriedelPair(tth0, eta0, *ome0, **kwargs): ) gHat_l = np.dot(rchi, tmp_gvec[:, i].reshape(3, 1)) tmp_eta[i] = np.arctan2(gHat_l[1], gHat_l[0]) - pass eta_min[goodOnes] = tmp_eta # everybody back to DEGREES! diff --git a/hexrd/material/material.py b/hexrd/material/material.py index 825af8b17..19ee27b62 100644 --- a/hexrd/material/material.py +++ b/hexrd/material/material.py @@ -233,7 +233,6 @@ def __str__(self): s = 'Material: %s\n' % self.name if self.description: s += ' description: %s\n' % self.description - pass s += ' plane Data: %s' % str(self.planeData) return s @@ -1400,12 +1399,6 @@ def _set_atomdata(self, atomtype, atominfo, U, charge): self._newUnitcell() self.invalidate_structure_factor() - # - # ========== Methods - # - # - pass # end class - # # -----------------------------------------------END CLASS: Material @@ -1506,7 +1499,6 @@ def get_hkl_strings(pdata): if len(sys.argv) == 1: print("need argument: materials.cfg") sys.exit() - pass ml = loadMaterialList(sys.argv[1]) @@ -1514,5 +1506,3 @@ def get_hkl_strings(pdata): print((' from file: ', sys.argv[1])) for m in ml: print(m) - pass - pass diff --git a/hexrd/material/spacegroup.py b/hexrd/material/spacegroup.py index 3efb7212c..1d296d3d5 100644 --- a/hexrd/material/spacegroup.py +++ b/hexrd/material/spacegroup.py @@ -132,10 +132,6 @@ def _set_sgnum(self, v): pglg = _pgDict[k] self._pointGroup = pglg[0] self._laueGroup = pglg[1] - pass - pass - - return sgnum = property(_get_sgnum, _set_sgnum, None, "Space group number") @@ -1299,10 +1295,7 @@ def _buildDict(hstr): n = int(nstr) if n not in d: d[n] = hstr - pass di[hstr] = n - pass - pass return d, di @@ -1926,10 +1919,6 @@ def iroot(n): return int(floor(sqrt(n))) # integer square root l = iroot(rem) if l*l == rem: hkls += [(h, k, l), (h, k, -l)] - pass - pass - pass - pass return hkls @@ -1954,7 +1943,6 @@ def testHKLs(): for hkl in myHKLs: ss = hkl[0]**2 + hkl[1]**2 + hkl[2]**2 print((hkl, ss)) - pass # # 2. Ruby (sg 167) @@ -1968,7 +1956,6 @@ def testHKLs(): for hkl in myHKLs: ss = hkl[0]**2 + hkl[1]**2 + hkl[2]**2 print((hkl, ss)) - pass # # Test Generic HKLs # @@ -1977,9 +1964,6 @@ def testHKLs(): hkls = _getHKLsBySS(ss) print(' number of hkls: ', len(hkls)) print(hkls) - pass - - return if __name__ == '__main__': @@ -2001,7 +1985,3 @@ def testHKLs(): except: print(('failed for space group number: ', n)) print(('Hall symbol: ', lookupHall[n])) - pass - pass - # - pass diff --git a/hexrd/material/symbols.py b/hexrd/material/symbols.py index 6f34afc8d..0119d4ba9 100644 --- a/hexrd/material/symbols.py +++ b/hexrd/material/symbols.py @@ -1259,10 +1259,7 @@ def _buildDict(hstr): hstr = hstr.replace(" ", "") if n not in d: d[n] = hstr - pass di[hstr] = n - pass - pass return d, di diff --git a/hexrd/material/symmetry.py b/hexrd/material/symmetry.py index a6a566eb4..de0347958 100644 --- a/hexrd/material/symmetry.py +++ b/hexrd/material/symmetry.py @@ -30,6 +30,7 @@ # Module containing functions relevant to symmetries import numpy as np +from numba import njit from numpy import array, sqrt, pi, \ vstack, c_, dot, \ argmax @@ -37,7 +38,7 @@ # from hexrd.rotations import quatOfAngleAxis, quatProductMatrix, fixQuat from hexrd import rotations as rot from hexrd import constants -from hexrd.utils.decorators import memoize, numba_njit_if_available +from hexrd.utils.decorators import memoize # ============================================================================= @@ -561,7 +562,7 @@ def GeneratePGSym_Laue(SYM_PG_d): return SYM_PG_d_laue -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def isnew(mat, sym_mats): for g in sym_mats: diff = np.sum(np.abs(mat - g)) diff --git a/hexrd/material/unitcell.py b/hexrd/material/unitcell.py index 597e8a776..18f304428 100644 --- a/hexrd/material/unitcell.py +++ b/hexrd/material/unitcell.py @@ -1,5 +1,6 @@ import importlib.resources import numpy as np +from numba import njit from hexrd import constants from hexrd.material import spacegroup, symbols, symmetry from hexrd.ipfcolor import sphere_sector, colorspace @@ -11,8 +12,6 @@ from scipy.interpolate import interp1d import time -from hexrd.utils.decorators import numba_njit_if_available - eps = constants.sqrt_epsf ENERGY_ID = 0 REAL_F1_ID = 1 @@ -25,12 +24,12 @@ ''' calculate dot product of two vectors in any space 'd' 'r' or 'c' ''' -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def _calclength(u, mat): return np.sqrt(np.dot(u, np.dot(mat, u))) -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def _calcstar(v, sym, mat): vsym = np.atleast_2d(v) for s in sym: diff --git a/hexrd/matrixutil.py b/hexrd/matrixutil.py index 12569a21b..4d9407e14 100644 --- a/hexrd/matrixutil.py +++ b/hexrd/matrixutil.py @@ -31,13 +31,11 @@ from numpy.linalg import svd from scipy import sparse +import numba +from numba import prange + -from hexrd.utils.decorators import numba_njit_if_available from hexrd import constants -from hexrd.constants import USE_NUMBA -if USE_NUMBA: - import numba - from numba import prange # module variables sqr6i = 1./np.sqrt(6.) @@ -262,42 +260,19 @@ def vecMVCOBMatrix(R): T = np.zeros((nrot, 6, 6), dtype='float64') - T[:, 0, 0] = R[:, 0, 0]**2 - T[:, 0, 1] = R[:, 0, 1]**2 - T[:, 0, 2] = R[:, 0, 2]**2 - T[:, 0, 3] = sqr2 * R[:, 0, 1] * R[:, 0, 2] - T[:, 0, 4] = sqr2 * R[:, 0, 0] * R[:, 0, 2] - T[:, 0, 5] = sqr2 * R[:, 0, 0] * R[:, 0, 1] - T[:, 1, 0] = R[:, 1, 0]**2 - T[:, 1, 1] = R[:, 1, 1]**2 - T[:, 1, 2] = R[:, 1, 2]**2 - T[:, 1, 3] = sqr2 * R[:, 1, 1] * R[:, 1, 2] - T[:, 1, 4] = sqr2 * R[:, 1, 0] * R[:, 1, 2] - T[:, 1, 5] = sqr2 * R[:, 1, 0] * R[:, 1, 1] - T[:, 2, 0] = R[:, 2, 0]**2 - T[:, 2, 1] = R[:, 2, 1]**2 - T[:, 2, 2] = R[:, 2, 2]**2 - T[:, 2, 3] = sqr2 * R[:, 2, 1] * R[:, 2, 2] - T[:, 2, 4] = sqr2 * R[:, 2, 0] * R[:, 2, 2] - T[:, 2, 5] = sqr2 * R[:, 2, 0] * R[:, 2, 1] - T[:, 3, 0] = sqr2 * R[:, 1, 0] * R[:, 2, 0] - T[:, 3, 1] = sqr2 * R[:, 1, 1] * R[:, 2, 1] - T[:, 3, 2] = sqr2 * R[:, 1, 2] * R[:, 2, 2] - T[:, 3, 3] = R[:, 1, 2] * R[:, 2, 1] + R[:, 1, 1] * R[:, 2, 2] - T[:, 3, 4] = R[:, 1, 2] * R[:, 2, 0] + R[:, 1, 0] * R[:, 2, 2] - T[:, 3, 5] = R[:, 1, 1] * R[:, 2, 0] + R[:, 1, 0] * R[:, 2, 1] - T[:, 4, 0] = sqr2 * R[:, 0, 0] * R[:, 2, 0] - T[:, 4, 1] = sqr2 * R[:, 0, 1] * R[:, 2, 1] - T[:, 4, 2] = sqr2 * R[:, 0, 2] * R[:, 2, 2] - T[:, 4, 3] = R[:, 0, 2] * R[:, 2, 1] + R[:, 0, 1] * R[:, 2, 2] - T[:, 4, 4] = R[:, 0, 2] * R[:, 2, 0] + R[:, 0, 0] * R[:, 2, 2] - T[:, 4, 5] = R[:, 0, 1] * R[:, 2, 0] + R[:, 0, 0] * R[:, 2, 1] - T[:, 5, 0] = sqr2 * R[:, 0, 0] * R[:, 1, 0] - T[:, 5, 1] = sqr2 * R[:, 0, 1] * R[:, 1, 1] - T[:, 5, 2] = sqr2 * R[:, 0, 2] * R[:, 1, 2] - T[:, 5, 3] = R[:, 0, 2] * R[:, 1, 1] + R[:, 0, 1] * R[:, 1, 2] - T[:, 5, 4] = R[:, 0, 0] * R[:, 1, 2] + R[:, 0, 2] * R[:, 1, 0] - T[:, 5, 5] = R[:, 0, 1] * R[:, 1, 0] + R[:, 0, 0] * R[:, 1, 1] + for i in range(3): + # Other two i values + i1, i2 = [k for k in range(3) if k != i] + for j in range(3): + # Other two j values + j1, j2 = [k for k in range(3) if k != j] + + T[:, i, j] = R[:, i, j] ** 2 + T[:, i, j + 3] = sqr2 * R[:, i, j1] * R[:, i, j2] + T[:, i + 3, j] = sqr2 * R[:, i1, j] * R[:, i2, j] + T[:, i + 3, j + 3] = ( + R[:, i1, j1] * R[:, i2, j2] + R[:, i1, j2] * R[:, i2, j1] + ) if nrot == 1: T = T.squeeze() @@ -562,7 +537,6 @@ def uniqueVectors(v, tol=1.0e-12): indep = np.hstack([True, tmpcmp > tol]) # independent values rowint = indep.cumsum() iv[np.ix_([row], tmpord)] = rowint - pass # # Dictionary sort from bottom up # @@ -577,8 +551,6 @@ def uniqueVectors(v, tol=1.0e-12): if any(ivSrt[:, col] != ivSrt[:, col - 1]): ivInd[nUniq] = col nUniq += 1 - pass - pass return vSrt[:, ivInd[0:nUniq]] @@ -697,7 +669,7 @@ def findDuplicateVectors(vec, tol=vTol, equivPM=False): return eqv2, uid2 -@numba_njit_if_available(cache=True, nogil=True) +@numba.njit(cache=True, nogil=True) def _findduplicatevectors(vec, tol, equivPM): """ Find vectors in an array that are equivalent to within @@ -984,26 +956,16 @@ def solve_wahba(v, w, weights=None): # ============================================================================= -if USE_NUMBA: - @numba.njit(cache=True, nogil=True) - def extract_ijv(in_array, threshold, out_i, out_j, out_v): - n = 0 - w, h = in_array.shape - for i in range(w): - for j in range(h): - v = in_array[i, j] - if v > threshold: - out_i[n] = i - out_j[n] = j - out_v[n] = v - n += 1 - return n -else: # not USE_NUMBA - def extract_ijv(in_array, threshold, out_i, out_j, out_v): - mask = in_array > threshold - n = np.sum(mask) - tmp_i, tmp_j = mask.nonzero() - out_i[:n] = tmp_i - out_j[:n] = tmp_j - out_v[:n] = in_array[mask] - return n +@numba.njit(cache=True, nogil=True) +def extract_ijv(in_array, threshold, out_i, out_j, out_v): + n = 0 + w, h = in_array.shape + for i in range(w): + for j in range(h): + v = in_array[i, j] + if v > threshold: + out_i[n] = i + out_j[n] = j + out_v[n] = v + n += 1 + return n diff --git a/hexrd/projections/spherical.py b/hexrd/projections/spherical.py index 50d7031ea..e0a35fe56 100644 --- a/hexrd/projections/spherical.py +++ b/hexrd/projections/spherical.py @@ -23,7 +23,6 @@ def __init__(self, mapping='stereographic', vector_type='d', self.invert_z = False elif self._vector_type == 'q': self.invert_z = True - pass self._output_dim = output_dim self._rmat = rmat diff --git a/hexrd/resources/pinhole_materials.h5 b/hexrd/resources/pinhole_materials.h5 index 0af1072b8..cfa2e5913 100644 Binary files a/hexrd/resources/pinhole_materials.h5 and b/hexrd/resources/pinhole_materials.h5 differ diff --git a/hexrd/rotations.py b/hexrd/rotations.py index fc3cfcff2..53fabd20e 100644 --- a/hexrd/rotations.py +++ b/hexrd/rotations.py @@ -42,6 +42,7 @@ ) from numpy import float_ as nFloat from numpy import int_ as nInt +from numba import njit from scipy.optimize import leastsq from hexrd import constants as cnst @@ -50,8 +51,6 @@ skewMatrixOfVector, findDuplicateVectors, \ multMatArray, nullSpace -from hexrd.utils.decorators import numba_njit_if_available - # ============================================================================= # Module Data # ============================================================================= @@ -586,7 +585,6 @@ def rotMatOfExpMap_opt(expMap): numObjs = expMap.shape[1] if numObjs == 1: # case of single point W = np.reshape(W, [1, 3, 3]) - pass C1 = np.tile( np.reshape(C1, [numObjs, 1]), @@ -600,8 +598,6 @@ def rotMatOfExpMap_opt(expMap): for i in range(3): for j in range(3): W2[:, i, j] = np.sum(W[:, i, :]*W[:, :, j], 1) - pass - pass rmat = C1*W + C2 * W2 rmat[:, 0, 0] += 1. @@ -677,7 +673,7 @@ def rotMatOfExpMap_orig(expMap): rotMatOfExpMap = rotMatOfExpMap_opt -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def _rotmatofquat(quat): n = quat.shape[1] # FIXME: maybe preallocate for speed? @@ -1447,12 +1443,10 @@ def mapAngle(ang, *args, **kwargs): while lbi.sum() > 0: ang[lbi] = ang[lbi] + period lbi = ang < lb - pass ubi = ang > ub while ubi.sum() > 0: ang[ubi] = ang[ubi] - period ubi = ang > ub - pass retval = ang else: retval = mod(ang + 0.5*period, period) - 0.5*period @@ -1923,5 +1917,3 @@ def testRotMatOfExpMap(numpts): dd = np.absolute(d2 - d1) print('maximum difference between results') print(np.max(dd, 0).max()) - - pass diff --git a/hexrd/sampleOrientations/conversions.py b/hexrd/sampleOrientations/conversions.py index a7172c029..2694fe6cc 100644 --- a/hexrd/sampleOrientations/conversions.py +++ b/hexrd/sampleOrientations/conversions.py @@ -1,16 +1,12 @@ import numpy as np +from numba import njit from hexrd import constants -from hexrd.utils.decorators import numba_njit_if_available - -if constants.USE_NUMBA: - from numba import prange -else: - prange = range ap_2 = constants.cuA_2 sc = constants.sc -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def getPyramid(xyz): x = xyz[0] y = xyz[1] @@ -34,12 +30,13 @@ def getPyramid(xyz): return 6 -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def cu2ro(cu): ho = cu2ho(cu) return ho2ro(ho) -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def cu2ho(cu): ma = np.max(np.abs(cu)) assert ma <= ap_2, "point outside cubochoric grid" @@ -90,12 +87,14 @@ def cu2ho(cu): elif pyd == 5 or pyd == 6: return np.array([LamXYZ[1], LamXYZ[2], LamXYZ[0]]) -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def ho2ro(ho): ax = ho2ax(ho) return ax2ro(ax) -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def ho2ax(ho): hmag = np.linalg.norm(ho[:])**2 if hmag < 1E-8: @@ -113,7 +112,8 @@ def ho2ax(ho): else: return np.array([hn[0], hn[1], hn[2], s]) -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def ax2ro(ax): if np.abs(ax[3]) < 1E-8: return np.array([0.0, 0.0, 1.0, 0.0]) @@ -124,12 +124,14 @@ def ax2ro(ax): else: return np.array([ax[0], ax[1], ax[2], np.tan(ax[3]*0.5)]) -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def ro2qu(ro): ax = ro2ax(ro) return ax2qu(ax) -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def ro2ax(ro): if np.abs(ro[3]) < 1E-8: return np.array([0.0, 0.0, 1.0, 0.0]) @@ -141,7 +143,7 @@ def ro2ax(ro): return np.array([ro[0]*mag, ro[1]*mag, ro[2]*mag, ang]) -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def ax2qu(ro): if np.abs(ro[3]) < 1E-8: return np.array([1.0, 0.0, 0.0, 0.0]) diff --git a/hexrd/sampleOrientations/rfz.py b/hexrd/sampleOrientations/rfz.py index 5131312ec..36556ca4e 100644 --- a/hexrd/sampleOrientations/rfz.py +++ b/hexrd/sampleOrientations/rfz.py @@ -1,20 +1,18 @@ import numpy as np +import numba + from hexrd.constants import FZtypeArray, FZorderArray -from hexrd.utils.decorators import numba_njit_if_available from hexrd import constants -if constants.USE_NUMBA: - from numba import prange -else: - prange = range -@numba_njit_if_available(cache=True, nogil=True) +@numba.njit(cache=True, nogil=True) def getFZtypeandOrder(pgnum): FZtype = FZtypeArray[pgnum-1] FZorder = FZorderArray[pgnum-1] return np.array([FZtype, FZorder]) -@numba_njit_if_available(cache=True, nogil=True) + +@numba.njit(cache=True, nogil=True) def insideCyclicFZ(ro, FZorder): res = False if ro[3] == np.inf: @@ -32,14 +30,15 @@ def insideCyclicFZ(ro, FZorder): return res -@numba_njit_if_available(cache=True, nogil=True) + +@numba.njit(cache=True, nogil=True) def insideDihedralFZ(ro, FZorder): if np.abs(ro[3]) >= np.sqrt(3.0): return False else: rod = ro[0:3] * ro[3] - c1 = (np.abs(rod[2]) <= constants.BP[FZorder-1]) + c1 = np.abs(rod[2]) <= constants.BP[FZorder-1] if c1: if FZorder == 2: @@ -78,7 +77,8 @@ def insideDihedralFZ(ro, FZorder): else: return False -@numba_njit_if_available(cache=True, nogil=True) + +@numba.njit(cache=True, nogil=True) def insideCubicFZ(ro, kwrd): rod = np.abs(ro[0:3] * ro[3]) @@ -91,7 +91,8 @@ def insideCubicFZ(ro, kwrd): res = np.logical_and(c1, c2) return res -@numba_njit_if_available(cache=True, nogil=True) + +@numba.njit(cache=True, nogil=True) def insideFZ(ro, pgnum): res = getFZtypeandOrder(pgnum) FZtype = res[0] @@ -116,4 +117,3 @@ def insideFZ(ro, pgnum): return False else: return insideCubicFZ(ro, 'oct') - diff --git a/hexrd/sampleOrientations/sampleRFZ.py b/hexrd/sampleOrientations/sampleRFZ.py index d05482863..27b50fd4b 100644 --- a/hexrd/sampleOrientations/sampleRFZ.py +++ b/hexrd/sampleOrientations/sampleRFZ.py @@ -1,15 +1,13 @@ import numpy as np -from hexrd.utils.decorators import numba_njit_if_available +import numba +from numba import prange + from hexrd.sampleOrientations.conversions import cu2ro, ro2qu from hexrd.sampleOrientations.rfz import insideFZ from hexrd import constants -if constants.USE_NUMBA: - from numba import prange -else: - prange = range -@numba_njit_if_available(cache=True, nogil=True, parallel=True) +@numba.njit(cache=True, nogil=True, parallel=True) def _sample(pgnum, N, delta, @@ -156,6 +154,3 @@ def shift(self): @property def delta(self): return self.ap_2 / self.cubN - - - diff --git a/hexrd/transforms/new_capi/transforms_definitions.py b/hexrd/transforms/new_capi/transforms_definitions.py deleted file mode 100644 index 2fb6df3bd..000000000 --- a/hexrd/transforms/new_capi/transforms_definitions.py +++ /dev/null @@ -1,827 +0,0 @@ -"""This module provides the definitions for the transforms API. It will also -provide a decorator to add to any implementation of the API. This module will -contain the reference comment that will be added to any function that implements -an API function, as well as a means to add pre and post conditions as an -additional way to document the implementations. - -Pre and Post conditions will be in the form of code, there will be means to -execute the scripts forcing those conditions to be evaluated and raise errors if -they are not met. This should always be optional and incur on no overhead unless -enabled, only to be used for debugging and validation purposes. - -Checking of signature definitions has been added. This happens if CHECK_API is -enabled (via the HEXRD_XF_CHECK environment variable). This is implemented via -the signature method in inspect (Python 3.3+). If not available, it falls back -to the backport in funcsigs. If unavailable, CHECK_API is disabled. - -""" -import os -import functools - -# Just a list of the API functions... -# Note this can be kind of redundant with the definition classes, but it also -# allows for some coherence checks. -API = ( - "angles_to_gvec", - "angles_to_dvec", - "gvec_to_xy", - "xy_to_gvec", - "solve_omega", - - "gvec_to_rays", - "rays_to_xy_planar", -# "rays_to_xy_cylindrical", - - "angular_difference", - "map_angle", - "row_norm", - "unit_vector", - "make_sample_rmat", - "make_rmat_of_expmap", - "make_binary_rmat", - "make_beam_rmat", - "angles_in_range", - "validate_angle_ranges", - "rotate_vecs_about_axis", - "quat_product_matrix", - "quat_distance" -) - -CHECK_API = os.getenv("XRD_TRANSFORMS_CHECK") -try: - from inspect import signature as get_signature -except ImportError: - try: - from funcsigs import signature as get_signature - except: - import warnings - - warnings.warn("Failed to import from inspect/funcsigs." - "Transforms API signature checking disabled.") - get_signature = None - - -class DEF_Func(object): - """Documentation to use for the function""" - - def _signature(): - """The signature of this method defines the one for the API - including default values.""" - pass - - @classmethod - def _PRECOND(cls, *args, **kwargs): - print("PRECOND (", cls.__class__.__name__,")") - pass - - @classmethod - def _POSTCOND(cls, results, *args, **kwargs): - print("PRECOND (", cls.__class__.__name__,")") - pass - - -# ============================================================================== -# API -# ============================================================================== - -class DEF_angles_to_gvec(DEF_Func): - """ - Takes triplets of angles in the beam frame (2*theta, eta, omega) - to components of unit G-vectors in the LAB frame. If the omega - values are not trivial (i.e. angs[:, 2] = 0.), then the components - are in the SAMPLE frame. If the crystal rmat is specified and - is not the identity, then the components are in the CRYSTAL frame. - - default beam_vec is defined in hexrd.constants.beam_vec - default eta_vec is defined in hexrd.constants.eta_vec - """ - def _signature(angs, - beam_vec=None, - eta_vec=None, - chi=None, - rmat_c=None): - pass - - -class DEF_angles_to_dvec(DEF_Func): - """ - Takes triplets of angles in the beam frame (2*theta, eta, omega) - to components of unit diffraction vectors in the LAB frame. If the - omega values are not trivial (i.e. angs[:, 2] = 0.), then the - components are in the SAMPLE frame. If the crystal rmat is specified - and is not the identity, then the components are in the CRYSTAL frame. - - default beam_vec is defined in hexrd.constants.beam_vec - default eta_vec is defined in hexrd.constants.eta_vec - """ - def _signature(angs, - beam_vec=None, - eta_vec=None, - chi=None, - rmat_c=None): - pass - - -class DEF_gvec_to_xy(DEF_Func): - """Takes a concatenated list of reciprocal lattice vectors components in the - CRYSTAL FRAME to the specified detector-relative frame, subject to the - following: - - 1) it must be able to satisfy a bragg condition - 2) the associated diffracted beam must intersect the detector plane - - Parameters - ---------- - gvec_c : array_like - ([N,] 3) G-vector components in the CRYSTAL FRAME. - rmat_d : array_like - The (3, 3) COB matrix taking components in the - DETECTOR FRAME to the LAB FRAME - rmat_s : array_like - The ([N,] 3, 3) COB matrix taking components in the SAMPLE FRAME to the - LAB FRAME. It may be a single (3, 3) rotation matrix to use for all - gvec_c, or just one rotation matrix per gvec. - rmat_c : array_like - The (3, 3) COB matrix taking components in the - CRYSTAL FRAME to the SAMPLE FRAME - tvec_d : array_like - The (3, ) translation vector connecting LAB FRAME to DETECTOR FRAME - tvec_s : array_like - The (3, ) translation vector connecting LAB FRAME to SAMPLE FRAME - tvec_c : array_like - The ([M,] 3, ) translation vector(s) connecting SAMPLE FRAME to - CRYSTAL FRAME - beam_vec : array_like, optional - The (3, ) incident beam propagation vector components in the LAB FRAME; - the default is [0, 0, -1], which is the standard setting. - vmat_inv : array_like, optional - The (3, 3) matrix of inverse stretch tensor components in the - SAMPLE FRAME. The default is None, which implies a strain-free state - (i.e. V = I). - bmat : array_like, optional - The (3, 3) COB matrix taking components in the - RECIPROCAL LATTICE FRAME to the CRYSTAL FRAME; if supplied, it is - assumed that the input `gvecs` are G-vector components in the - RECIPROCL LATTICE FRAME (the default is None, which implies components - in the CRYSTAL FRAME) - - Returns - ------- - array_like - The ([M, ][N, ] 2) array of [x, y] diffracted beam intersections for each - of the N input G-vectors in the DETECTOR FRAME (all Z_d coordinates are - 0 and excluded) and for each of the M candidate positions. For each - input G-vector that cannot satisfy a Bragg condition or intersect the - detector plane, [NaN, Nan] is returned. - - Raises - ------ - AttributeError - The ``Raises`` section is a list of all exceptions - that are relevant to the interface. - ValueError - If `param2` is equal to `param1`. - - Notes - ----- - Previously only a single candidate position was allowed. This is in fact - a vectored version of the previous API function. It is backwards - compatible, as passing single tvec_c is supported and has the same - result. - """ - def _signature(gvec_c, - rmat_d, rmat_s, rmat_c, - tvec_d, tvec_s, tvec_c, - beam_vec=None, - vmat_inv=None, - bmat=None): - pass - - -class DEF_xy_to_gvec(DEF_Func): - """ - Takes a list cartesian (x, y) pairs in the DETECTOR FRAME and calculates - the associated reciprocal lattice (G) vectors and (bragg angle, azimuth) - pairs with respect to the specified beam and azimth (eta) reference - directions. - - Parameters - ---------- - xy_d : array_like - (n, 2) array of n (x, y) coordinates in DETECTOR FRAME - rmat_d : array_like - (3, 3) COB matrix taking components in the - DETECTOR FRAME to the LAB FRAME - rmat_s : array_like - (3, 3) COB matrix taking components in the - SAMPLE FRAME to the LAB FRAME - tvec_d : array_like - (3, ) translation vector connecting LAB FRAME to DETECTOR FRAME - tvec_s : array_like - (3, ) translation vector connecting LAB FRAME to SAMPLE FRAME - tvec_c : array_like - (3, ) translation vector connecting SAMPLE FRAME to CRYSTAL FRAME - rmat_b : array_like, optional - (3, 3) COB matrix taking components in the BEAM FRAME to the LAB FRAME; - defaults to None, which implies the standard setting of identity. - distortion : distortion class, optional - Default is None - output_ref : bool, optional - If True, prepends the apparent bragg angle and azimuth with respect to - the SAMPLE FRAME (ignoring effect of non-zero tvec_c) - - Returns - ------- - array_like - (n, 2) ndarray containing the (tth, eta) pairs associated with each - (x, y) associated with gVecs - array_like - (n, 3) ndarray containing the associated G vector directions in the - LAB FRAME - array_like, optional - if output_ref is True - - Notes - ----- - ???: is there a need to flatten the tvec inputs? - ???: include optional wavelength input for returning G with magnitude? - ???: is there a need to check that rmat_b is orthogonal if spec'd? - """ - def _signature(xy_d, - rmat_d, rmat_s, - tvec_d, tvec_s, tvec_c, - rmat_b=None, - distortion=None, - output_ref=False): - pass - - -class DEF_solve_omega(DEF_Func): - """ - For the monochromatic rotation method. - - Solve the for the rotation angle pairs that satisfy the bragg conditions - for an input list of G-vector components. - - Parameters - ---------- - gvecs : array_like - Concatenated triplets of G-vector components in either the - CRYSTAL FRAME or RECIPROCAL FRAME (see optional kwarg `bmat` below). - The shape when cast as a 2-d ndarray is (n, 3), representing n vectors. - chi : float - The inclination angle of the goniometer axis (standard coords) - Rmat_c : array_like - (3, 3) COB matrix taking components in the - CRYSTAL FRAME to the SAMPLE FRAME - wavelength : float - The X-ray wavelength in Angstroms - bmat : array_like, optional - The (3, 3) COB matrix taking components in the - RECIPROCAL LATTICE FRAME to the CRYSTAL FRAME; if supplied, it is - assumed that the input `gvecs` are G-vector components in the - RECIPROCL LATTICE FRAME (the default is None, which implies components - in the CRYSTAL FRAME) - vmat_inv : array_like, optional - The (3, 3) matrix of inverse stretch tensor components in the - SAMPLE FRAME. The default is None, which implies a strain-free state - (i.e. V = I). - rmat_b : array_like, optional - (3, 3) COB matrix taking components in the BEAM FRAME to the LAB FRAME; - defaults to None, which implies the standard setting of identity. - - Returns - ------- - ome0 : array_like - The (n, 3) ndarray containing the feasible (tth, eta, ome) triplets for - each input hkl (first solution) - ome1 : array_like - The (n, 3) ndarray containing the feasible (tth, eta, ome) triplets for - each input hkl (second solution) - - Notes - ----- - The reciprocal lattice vector, G, will satisfy the the Bragg condition - when: - - b.T * G / ||G|| = -sin(theta) - - where b is the incident beam direction (k_i) and theta is the Bragg - angle consistent with G and the specified wavelength. The components of - G in the lab frame in this case are obtained using the crystal - orientation, Rc, and the single-parameter oscillation matrix, Rs(ome): - - Rs(ome) * Rc * G / ||G|| - - The equation above can be rearranged to yeild an expression of the form: - - a*sin(ome) + b*cos(ome) = c - - which is solved using the relation: - - a*sin(x) + b*cos(x) = sqrt(a**2 + b**2) * sin(x + alpha) - - --> sin(x + alpha) = c / sqrt(a**2 + b**2) - - where: - - alpha = arctan2(b, a) - - The solutions are: - - / - | arcsin(c / sqrt(a**2 + b**2)) - alpha - x = < - | pi - arcsin(c / sqrt(a**2 + b**2)) - alpha - \ - - There is a double root in the case the reflection is tangent to the - Debye-Scherrer cone (c**2 = a**2 + b**2), and no solution if the - Laue condition cannot be satisfied (filled with NaNs in the results - array here) - """ - def _signature(gvecs, chi, rmat_c, wavelength, - bmat=None, vmat_inv=None, rmat_b=None): - pass - - -class DEF_gvec_to_rays(DEF_Func): - """Takes a concatenated list of reciprocal lattice vectors components in the - CRYSTAL FRAME and generates associated diffraction rays, ready to be tested - agains detectors. - - Parameters - ---------- - gvec_c : array_like - (N, 3) G-vector components in the CRYSTAL FRAME. - rmat_s : array_like - The ([N,] 3, 3) COB matrix taking components in the SAMPLE FRAME to the - LAB FRAME. It may be a single (3, 3) rotation matrix to use for all - gvec_c, or just one rotation matrix per gvec. - rmat_c : array_like - The (3, 3) COB matrix taking components in the - CRYSTAL FRAME to the SAMPLE FRAME - tvec_s : array_like - The (3, ) translation vector connecting LAB FRAME to SAMPLE FRAME - tvec_c : array_like - The ([M,] 3, ) translation vector(s) connecting SAMPLE FRAME to - CRYSTAL FRAME - beam_vec : array_like, optional - The (3, ) incident beam propagation vector components in the LAB FRAME; - the default is [0, 0, -1], which is the standard setting. - - Returns - ------- - (vectors, origins) - - vectors : array - A (N, 3) array of diffraction vectors in LAB FRAME. These are the ray - directions. G-vectors that won't diffract will result in NaN entries. - origins : array - The ([M,] [N,] 3) array of points acting as origins for the rays. - - Depending on the problem, the origins array may have entries for each - different gvector. This is related to whether each gvec has an associated - rmat_s or not. - - Raises - ------ - ValueError - If array inputs have dimensions that do not match the description. - MemoryError - When result array fails to allocate. - - Notes - ----- - This function is part of the refactor of gvec_to_xy. Using the results - of this function with rays_to_xy_planar should have the same results as - gvec_to_xy. - """ - def _signature(gvec_c, rmat_s, rmat_c, tvec_s, tvec_c, beam_vec=None): - pass - - -class DEF_rays_to_xy_planar(DEF_Func): - """Compute (x,y) coordinates of the intersection of rays with a planar - detector. - - Parameters - ---------- - vectors : array_like - (N, 3) array of vectors to use as ray directions. - - origins : array_like - ([M,] [N, ] 3) array of points to use as ray origins. - - rmat_d : array_like - (3, 3) COB matrix taking from DETECTOR FRAME to LAB FRAME. - - tvec_d : array_like - (3,) position of the detector, in LAB FRAME. - - origin_per_vector : boolean - If True, there will be an 'N' dimension in the origin points. That is, - each vector will have its own origin point. If False, no 'N' dimensions - are present, so a single origin point will be shared by the vectors. - Returns - ------- - array_like: - (M, N, 2) array with the parametric (x,y) coordinates in the detector - plane for each (m, n) ray. An (m, n) ray is forme with the vector - vectors[n] and the point origins[m] if origins is (M, 3) or the point - origins[m,n] if origins is (M, N, 3). - - Depending on the problem, the origins array may have entries for each - different gvector. This is related to whether each gvec has an associated - rmat_s or not. - - Raises - ------ - ValueError - If array inputs have dimensions that do not match the description. - MemoryError - When result array fails to allocate. - - Notes - ----- - This function is part of the refactor of gvec_to_xy. Using the results - of gvecs_to_rays with this function should return the same results as - gvec_to_xy. - - The 'origin_per_vector' is required to disambiguate the case of having - a (X, 3) vector array and an (X, 3) origin array, that could either mean - "N=X, M not present, one origin per vector" or "N=X, M=X, reuse points - for the vectors". 'origin_per_vector' basically says that the origins - array has shape ([M,], N, 3) if there is an N. - - """ - def _signature(vectors, origins, rmat_d, tvec_d, origin_per_vector=False): - pass - - -# ============================================================================== -# UTILITY FUNCTIONS API -# ============================================================================== - -class DEF_angular_difference(DEF_Func): - """ - Do the proper (acute) angular difference in the context of a branch cut. - - *) Default angular range is [-pi, pi] - """ - def _signature(ang_list0, ang_list1, units=None): - pass - - -class DEF_map_angle(DEF_Func): - """ - Utility routine to map an angle into a specified period - - actual function is map_angle(ang[, range], units=None). - range is optional and defaults to the appropriate angle for the unit - centered on 0. - - accepted units are: 'radians' and 'degrees' - """ - - def _signature(ang, range=None, units=None): - pass - - -class DEF_row_norm(DEF_Func): - """ - Compute the norm of row vectors. - - guaranteed to work for 1d and 2d arrays. - - note: numpy.linalg.norm could be used instead as this is equivalent to - numpy.linalg.norm(vec_in, axis=1) - """ - def _signature(vec_in): - pass - - -class DEF_unit_vector(DEF_Func): - """ - Normalize an array of row vectors (vstacked, axis=0) - For vectors with (very close to) zero norm, the original - vector is returned. - - guaranteed to work for 1d and 2d arrays. - """ - def _signature(vec_in): - pass - - -class DEF_make_sample_rmat(DEF_Func): - """ - Make SAMPLE frame rotation matrices as composition of - rotation of ome about the axis - - [0., cos(chi), sin(chi)] - - in the LAB frame. - - Parameters - ---------- - chi : float - The inclination angle of the goniometer axis (standard coords) - ome : array_like - (n) angles to generate rotation matrices from. - - Returns - ------- - array_like - (n, 3, 3) a vector of the n rotation matrices along the - axis defined by chi, one for each ome. - """ - def _signature(chi, ome): - pass - - -class DEF_make_rmat_of_expmap(DEF_Func): - """ - Calculates the rotation matrix from an exponential map - - Parameters - ---------- - exp_map: array_like - (3,) exp_map to generate the rotation matrix from. - - Returns - ------- - array_like - (3,3) the associated rotation matrix - """ - def _signature(exp_map): - pass - - -class DEF_make_binary_rmat(DEF_Func): - """ - make a binary rotation matrix about the specified axis - - Used to compute the refracted vector wrt the axis. - - Parameters - ---------- - Axis: array_like - (3,) axis to use to generate the rotation matrix - - Returns - ------- - array_like - (3, 3) the resulting rotation matrix - """ - def _signature(axis): - pass - - -class DEF_make_beam_rmat(DEF_Func): - """ - make eta basis COB matrix with beam antiparallel with Z - - takes components from BEAM frame to LAB - - Parameters - ---------- - bvec_l: array_like - (3,) inciding beam vector in LAB frame - evec_l: array_like - (3,) eta vector to form the COB matrix - - Returns - ------- - array - (3, 3) the resulting COB matrix - """ - def _signature(bvec_l, evec_l): - pass - - -class DEF_angles_in_range(DEF_Func): - """Determine whether angles lie in or out of specified ranges - - *angles* - a list/array of angles - *starts* - a list of range starts - *stops* - a list of range stops - - OPTIONAL ARGS: - *degrees* - [True] angles & ranges in degrees (or radians) - """ - def _signature(angles, starts, stops, degrees=True): - pass - - -class DEF_validate_angle_ranges(DEF_Func): - """ - Determine whether angles lie in or out of a set of ranges. - - Parameters - ---------- - ang_list: array_like - (n,) angles to check - start_angs: array_like - (m,) start of the angle spans to check - stop_angs: array_like - (m,) end of the angle spans to check - ccw: boolean - True if the check is to be performed counter-clockwise. False to check - clockwise - - Returns - ------- - array - (n,) array of booleans indicating the angles that pass - the test. - - Notes - ----- - - Each angle is checked against all the angle spans. The angles are normalized - into the [-pi,pi[ range. As start/stop in a circunference actually defines - two ranges, the ccw flag is used to choose which one to use. - - For example, a range [0, pi[ would include 0.5*pi if using - counter-clockwise, but not when using clockwise. In the same - way, -0.5*pi would be included when using clockwise, but will not when using - counter-clockwise. - - In the case that start and end have the same value, it is considered that - all the angles are included. - """ - def _signature(ang_list, start_angs, stop_angs, ccw=True): - pass - - -class DEF_rotate_vecs_about_axis(DEF_Func): - """ - Rotate vectors about an axis - - Parameters - ---------- - angle: array_like - ([n,]) angle(s) to rotate. - axis: array_like - ([n,] 3) normalized vector(s) to rotate about. - vecs: array_like - (n, 3) vector(s) to rotate. - - Returns - ------- - array - rotated vectors. - - Notes - ----- - Operations are made one by one. The [n,] dimension, if present, - must match for all arguments using it. - - - Quaternion formula: - if we split v into parallel and perpedicular components w.r.t. the - axis of quaternion q, - - v = a + n - - then the action of rotating the vector dot(R(q), v) becomes - - v_rot = (q0**2 - |q|**2)(a + n) + 2*dot(q, a)*q + 2*q0*cross(q, n) - - """ - def _signature(angle, axis, vecs): - pass - - -class DEF_quat_product_matrix(DEF_Func): - """ - Form 4 x 4 array to perform the quaternion product - - USAGE - qmat = quatProductMatrix(q, mult='right') - - INPUTS - 1) quats is (4,), an iterable representing a unit quaternion - horizontally concatenated - 2) mult is a keyword arg, either 'left' or 'right', denoting - the sense of the multiplication: - - / quatProductMatrix(h, mult='right') * q - q * h --> < - \ quatProductMatrix(q, mult='left') * h - - OUTPUTS - 1) qmat is (4, 4), the left or right quaternion product - operator - - NOTES - *) This function is intended to replace a cross-product based - routine for products of quaternions with large arrays of - quaternions (e.g. applying symmetries to a large set of - orientations). - """ - def _signature(q, mult='right'): - pass - - -class DEF_quat_distance(DEF_Func): - """ - Find the distance between two unit quaternions under symmetry group. - - Parameters - ---------- - q1: array_like - (4,) first quaternion for distance computation - q2: array_like - (4,) second quaternion for distance computation - qsym: array_like - (4, N) quaternions defining the N symmetries to compute distances - - Returns - ------- - double - the resulting distance of the quaternions - - Notes - ----- - The quaternions are expected to be (4,) arrays, where the real part (w) is - at index 0, while the imaginary parts (i, j, k) are at indices 1, 2, 3 - respectively. - - For example, the identity quaternion could be built by: - numpy.r_[1.0, 0.0, 0.0, 0.0] - - Also note that the quaternions specifying the symmetries are expected in - column-major order. - """ - def _signature(q1, q2, qsym): - pass - - -# ============================================================================== -# Decorator to mark implementations of the API. Names must match. -# ============================================================================== - -def xf_api(f, name=None): - """decorator to apply to the entry points of the transforms module""" - api_call = name if name is not None else f.__name__ - - if not api_call in API: - raise RuntimeError("'%s' is not part of the transforms API.") - - try: - fn_def = globals()['DEF_'+api_call] - except KeyError: - # This happens if there is no definition for the decorated function - raise RuntimeError("'%s' definition not found." % api_call) - - try: - # python 2 - _string_type = basestring - except NameError: - # This will happen on python 3 - _string_type = str - - try: - if not (isinstance(fn_def.__doc__, _string_type) and - callable(fn_def._PRECOND) and - callable(fn_def._POSTCOND) and - callable(fn_def._signature)): - raise Exception() - except Exception: - # A valid definition requires a string doc, and callable _PRECOND, - # _POSTCOND and _signature. - # - # __doc__ will become the decorated function's documentation. - # _PRECOND will be run on every call with args and kwargs - # _POSTCOND will be run on every call with result, args and kwargs - # _signature will be used to enforce a signature on implementations. - # - # _PRECOND and _POSTCOND will only be called if CHECK_API is enabled, - # as they will slow down execution. - raise RuntimeError("'{0}' definition error.".format(api_call)) - - # Sanity check: make sure the decorated function has the expected signature. - if get_signature is not None: - # Check that the function has the right signature - if get_signature(fn_def._signature) != get_signature(f): - raise RuntimeError("'{0}' signature mismatch.".format(api_call)) - - # At this point use a wrapper that calls pre and post conditions if checking - # is enabled, otherwise leave the function "as is". - if CHECK_API: - @functools.wraps(f, assigned={"__doc__": fn_def.__doc__}) - def wrapper(*args, **kwargs): - fn_def._PRECOND(*args, **kwargs) - result = f(*args, **kwargs) - fn_def._POSTCOND(result, *args, **kwargs) - return result - - return wrapper - else: - # just try to put the right documentation on the function - try: - f.__doc__ = fn_def.__doc__ - except Exception: - pass - return f diff --git a/hexrd/transforms/new_capi/xf_new_capi.py b/hexrd/transforms/new_capi/xf_new_capi.py index 1372d9af0..4e0280680 100644 --- a/hexrd/transforms/new_capi/xf_new_capi.py +++ b/hexrd/transforms/new_capi/xf_new_capi.py @@ -24,13 +24,11 @@ - homochoricOfQuat """ from . import constants as cnst -from .transforms_definitions import xf_api from hexrd.extensions import _new_transforms_capi as _impl import numpy as np -@xf_api def angles_to_gvec(angs, beam_vec=None, eta_vec=None, chi=None, rmat_c=None): """ @@ -87,7 +85,6 @@ def angles_to_gvec(angs, beam_vec=None, eta_vec=None, chi=None, rmat_c=None): return result[0] if orig_ndim == 1 else result -@xf_api def angles_to_dvec(angs, beam_vec=None, eta_vec=None, chi=None, rmat_c=None): """ @@ -163,7 +160,6 @@ def makeGVector(hkl, bMat): return unitVector(np.dot(bMat, hkl)) -@xf_api def gvec_to_xy( gvec_c, rmat_d, @@ -260,7 +256,6 @@ def gvec_to_xy( return result[0] if orig_ndim == 1 else result -@xf_api def xy_to_gvec( xy_d, rmat_d, @@ -342,7 +337,6 @@ def xy_to_gvec( ) -# @xf_api def oscillAnglesOfHKLs( hkls, chi, @@ -368,7 +362,6 @@ def oscillAnglesOfHKLs( ) -@xf_api def unit_vector(vec_in): """ Normalize the input vector(s) to unit length. @@ -400,7 +393,6 @@ def unit_vector(vec_in): ) -# @xf_api def makeDetectorRotMat(tiltAngles): arg = np.ascontiguousarray(np.r_[tiltAngles].flatten()) return _impl.makeDetectorRotMat(arg) @@ -410,7 +402,6 @@ def makeDetectorRotMat(tiltAngles): # and makeOscillRotMatArray... -# @xf_api def make_oscill_rot_mat(oscillAngles): chi, ome = oscillAngles ome = np.atleast_1d(ome) @@ -418,13 +409,11 @@ def make_oscill_rot_mat(oscillAngles): return result.reshape((3, 3)) -# @xf_api def make_oscill_rot_mat_array(chi, omeArray): arg = np.ascontiguousarray(omeArray) return _impl.makeOscillRotMat(chi, arg) -@xf_api def make_sample_rmat(chi, ome): # TODO: Check this docstring """ @@ -458,7 +447,6 @@ def make_sample_rmat(chi, ome): return result -@xf_api def make_rmat_of_expmap(exp_map): """ Calculate the rotation matrix of an exponential map. @@ -477,7 +465,6 @@ def make_rmat_of_expmap(exp_map): return _impl.makeRotMatOfExpMap(arg) -@xf_api def make_binary_rmat(axis): # TODO: Make this docstring. @@ -485,7 +472,6 @@ def make_binary_rmat(axis): return _impl.makeBinaryRotMat(arg) -@xf_api def make_beam_rmat(bvec_l, evec_l): """ Creates a COB matrix from the beam frame to the lab frame @@ -505,7 +491,6 @@ def make_beam_rmat(bvec_l, evec_l): return _impl.makeEtaFrameRotMat(arg1, arg2) -@xf_api def validate_angle_ranges(ang_list, start_angs, stop_angs, ccw=True): """ Find out if angles are in the CCW or CW range from start to stop @@ -535,7 +520,6 @@ def validate_angle_ranges(ang_list, start_angs, stop_angs, ccw=True): return _impl.validateAngleRanges(ang_list, start_angs, stop_angs, ccw) -@xf_api def rotate_vecs_about_axis(angle, axis, vecs): """ Rotate vectors about an axis @@ -561,7 +545,6 @@ def rotate_vecs_about_axis(angle, axis, vecs): return result.T -@xf_api def quat_distance(q1, q2, qsym): """ Distance between two quaternions, taking quaternions of symmetry into diff --git a/hexrd/transforms/new_capi/xf_numba.py b/hexrd/transforms/new_capi/xf_numba.py deleted file mode 100644 index f03175450..000000000 --- a/hexrd/transforms/new_capi/xf_numba.py +++ /dev/null @@ -1,452 +0,0 @@ -#! /usr/bin/env python -# ============================================================================= -# Copyright (c) 2012, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# Written by Joel Bernier and others. -# LLNL-CODE-529294. -# All rights reserved. -# -# This file is part of HEXRD. For details on dowloading the source, -# see the file COPYING. -# -# Please also see the file LICENSE. -# -# This program is free software; you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License (as published by the Free -# Software Foundation) version 2.1 dated February 1999. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program (see file LICENSE); if not, write to -# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, -# Boston, MA 02111-1307 USA or visit . -# ============================================================================= -# ??? do we want to set np.seterr(invalid='ignore') to avoid nan warnings? -# -*- coding: utf-8 -*- - -"""Tranforms module implementation using numba. - -Currently, this implementation contains code for the following functions: - -- angles_to_gvec -- angles_to_dvec - -- row_norm -- unit_vector -- make_rmat_of_expmap -- make_beam_rmat -""" -import numpy as np -from numpy import float_ as npfloat -from numpy import int_ as npint - -from . import constants as cnst -from .transforms_definitions import xf_api, get_signature -from .xf_numpy import _beam_to_crystal - -try: - import numba -except ImportError: - # Numba is an optional dependency. Any code relying on numba should be - # optional - raise ImportError("xf_numba not available: numba not installed") - -# Use the following decorator instead of numba.jit for interface functions. -# This is so we can patch certain features. -def xfapi_jit(fn): - out = numba.jit(fn) - out.__signature__ = get_signature(fn) - - return out - - -@numba.njit -def _angles_to_gvec_helper(angs, out=None): - """ - angs are vstacked [2*theta, eta, omega], although omega is optional - - This should be equivalent to the one-liner numpy version: - out = np.vstack([[np.cos(0.5*angs[:, 0]) * np.cos(angs[:, 1])], - [np.cos(0.5*angs[:, 0]) * np.sin(angs[:, 1])], - [np.sin(0.5*angs[:, 0])]]) - - although much faster - """ - count, dim = angs.shape - out = out if out is not None else np.empty((count, 3), dtype=angs.dtype) - - for i in range(count): - ca0 = np.cos(0.5*angs[i, 0]) - sa0 = np.sin(0.5*angs[i, 0]) - ca1 = np.cos(angs[i, 1]) - sa1 = np.sin(angs[i, 1]) - out[i, 0] = ca0 * ca1 - out[i, 1] = ca0 * sa1 - out[i, 2] = sa0 - - return out - - -@numba.njit -def _angles_to_dvec_helper(angs, out=None): - """ - angs are vstacked [2*theta, eta, omega], although omega is optional - - This shoud be equivalent to the one-liner numpy version: - out = np.vstack([[np.sin(angs[:, 0]) * np.cos(angs[:, 1])], - [np.sin(angs[:, 0]) * np.sin(angs[:, 1])], - [-np.cos(angs[:, 0])]]) - - although much faster - """ - _, dim = angs.shape - out = out if out is not None else np.empty((dim, 3), dtype=angs.dtype) - for i in range(len(angs)): - ca0 = np.cos(angs[i, 0]) - sa0 = np.sin(angs[i, 0]) - ca1 = np.cos(angs[i, 1]) - sa1 = np.sin(angs[i, 1]) - out[i, 0] = sa0 * ca1 - out[i, 1] = sa0 * sa1 - out[i, 2] = -ca0 - - return out - -@numba.njit -def _rmat_s_helper(chi=None, omes=None, out=None): - """ - simple utility for calculating sample rotation matrices based on - standard definition for HEDM - - chi is a single value, 0.0 by default - omes is either a 1d array or None. - If None the code should be equivalent to a single ome of value 0.0 - - out is a preallocated output array. No check is done about it having the - proper size. If None a new array will be allocated. The expected size - of the array is as many 3x3 matrices as omes (n, 3, 3). - """ - if chi is not None: - cx = np.cos(chi) - sx = np.sin(chi) - else: - cx = 1.0 - sx = 0.0 - - if omes is not None: - # omes is an array (vector): output is as many rotation matrices as omes entries. - n = len(omes) - out = out if out is not None else np.empty((n,3,3), dtype=omes.dtype) - - if chi is not None: - # ome is array and chi is a value... compute output - cx = np.cos(chi) - sx = np.sin(chi) - for i in range(n): - cw = np.cos(omes[i]) - sw = np.sin(omes[i]) - out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw - out[i, 1, 0] = sx*sw; out[i, 1, 1] = cx; out[i, 1, 2] = -sx*cw - out[i, 2, 0] = -cx*sw; out[i, 2, 1] = sx; out[i, 2, 2] = cx*cw - else: - # omes is array and chi is None -> equivalent to chi=0.0, but shortcut computations. - # cx IS 1.0, sx IS 0.0 - for i in range(n): - cw = np.cos(omes[i]) - sw = np.sin(omes[i]) - out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw - out[i, 1, 0] = 0.; out[i, 1, 1] = 1.; out[i, 1, 2] = 0. - out[i, 2, 0] = -sw; out[i, 2, 1] = 0.; out[i, 2, 2] = cw - else: - # omes is None, results should be equivalent to an array with a single element 0.0 - out = out if out is not None else np.empty((1, 3, 3)) - if chi is not None: - # ome is 0.0. cw is 1.0 and sw is 0.0 - cx = np.cos(chi) - sx = np.sin(chi) - out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0. - out[0, 1, 0] = 0.; out[0, 1, 1] = cx; out[0, 1, 2] = -sx - out[0, 2, 0] = 0.; out[0, 2, 1] = sx; out[0, 2, 2] = cx - else: - # both omes and chi are None... return a single identity matrix. - out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0. - out[0, 1, 0] = 0.; out[0, 1, 1] = 1.; out[0, 1, 2] = 0. - out[0, 2, 0] = 0.; out[0, 2, 1] = 0.; out[0, 2, 2] = 1. - - - return out - - -@xf_api -def angles_to_gvec(angs, - beam_vec=None, eta_vec=None, - chi=None, rmat_c=None): - """Note about this implementation: - This used to take rmat_b instead of the pair beam_vec, eta_vec. So it may require - some checking. - """ - orig_ndim = angs.ndim - angs = np.atleast_2d(angs) - nvecs, dim = angs.shape - - # make vectors in beam frame - gvec_b = _angles_to_gvec_helper(angs[:,0:2]) - - # _rmat_s_helper could return None to mean "Identity" when chi and ome are None. - omes = angs[:, 2] if dim > 2 else None - if chi is not None or omes is not None: - rmat_s = _rmat_s_helper(chi=chi, omes=omes) - else: - rmat_s = None - - # apply defaults to beam_vec and eta_vec. - # TODO: use a default rmat when beam_vec and eta_vec are None so computations - # can be avoided? - beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec - eta_vec = eta_vec if eta_vec is not None else cnst.beam_vec - rmat_b = make_beam_rmat(beam_vec, eta_vec) - - out = _beam_to_crystal(gvec_b, - rmat_b=rmat_b, rmat_s=rmat_s, rmat_c=rmat_c) - return out[0] if orig_ndim == 1 else out - - -@xf_api -def angles_to_dvec(angs, - beam_vec=None, eta_vec=None, - chi=None, rmat_c=None): - """Note about this implementation: - - This used to take rmat_b instead of the pair beam_vec, eta_vec. So it may - require some checking. - """ - angs = np.atleast_2d(angs) - nvecs, dim = angs.shape - - # make vectors in beam frame - dvec_b = _angles_to_dvec_helper(angs[:,0:2]) - - # calculate rmat_s - omes = angs[:, 2] if dim>2 else None - if chi is not None or omes is not None: - rmat_s = _rmat_s_helper(chi=chi, omes=omes) - else: - rmat_s = None - - # apply defaults to beam_vec and eta_vec. - # TODO: use a default rmat when beam_vec and eta_vec are None so computations - # can be avoided? - beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec - eta_vec = eta_vec if eta_vec is not None else cnst.beam_vec - rmat_b = make_beam_rmat(beam_vec, eta_vec) - - return _beam_to_crystal(dvec_b, - rmat_b=rmat_b, rmat_s=rmat_s, rmat_c=rmat_c) - - -# this could be a gufunc... (n)->() -@numba.njit -def _row_norm(a, out=None): - n, dim = a.shape - out = out if out is not None else np.empty(n, dtype=a.dtype) - for i in range(n): - nrm = 0.0 - for j in range(dim): - x = a[i, j] - nrm += x*x - out[i] = np.sqrt(nrm) - - return out - - -# this and _unit_vector_single would be better as a gufunc. -@numba.njit -def _unit_vector_single(a, out=None): - out = out if out is not None else np.empty_like(a) - - n = len(a) - sqr_norm = a[0]*a[0] - for i in range(1, n): - sqr_norm += a[i]*a[i] - - # prevent divide by zero - if sqr_norm > cnst.epsf: - recip_norm = 1.0 / np.sqrt(sqr_norm) - out[:] = a[:] * recip_norm - else: - out[:] = a[:] - - return out - - -@numba.njit -def _unit_vector_multi(a, out=None): - out = out if out is not None else np.empty_like(a) - - n, dim = a.shape - for i in range(n): - #_unit_vector_single(a[i], out=out[i]) - sqr_norm = a[i, 0] * a[i, 0] - - for j in range(1, dim): - sqr_norm += a[i, j]*a[i, j] - - if sqr_norm > cnst.epsf: - recip_norm = 1.0 / np.sqrt(sqr_norm) - out[i,:] = a[i,:] * recip_norm - else: - out[i,:] = a[i,:] - - return out - -@xf_api -def row_norm(vec_in): - """ - return row-wise norms for a list of vectors - """ - # TODO: leave this to a PRECONDITION in the xf_api? - if vec_in.ndim == 1: - out = _row_norm(np.atleast_2d(vec_in))[0] - elif vec_in.ndim == 2: - out = _row_norm(vec_in) - else: - raise ValueError( - "incorrect shape: arg must be 1-d or 2-d, yours is %d" - % (len(vec_in.shape))) - - return out - - -@xf_api -def unit_vector(vec_in): - """ - normalize array of column vectors (hstacked, axis = 0) - """ - if vec_in.ndim == 1: - out = _unit_vector_single(vec_in) - elif vec_in.ndim == 2: - out = _unit_vector_multi(vec_in) - else: - raise ValueError( - "incorrect arg shape; must be 1-d or 2-d, yours is %d-d" - % (vec_in.ndim) - ) - return out - - -@numba.njit -def _make_rmat_of_expmap(x, out=None): - """ - TODO: - - Test effectiveness of two options: - - 1) avoid conditional inside for loop and use np.divide to return NaN - for the phi = 0 cases, and deal with it later; or - 2) catch phi = 0 cases inside the loop and just return squeezed answer - """ - n = len(x) - out = out if out is not None else np.empty((n,3,3), dtype=x.dtype) - for i in range(n): - phi = np.sqrt(x[i, 0]*x[i, 0] + x[i, 1]*x[i, 1] + x[i, 2]*x[i, 2]) - if phi <= cnst.sqrt_epsf: - out[i, 0, 0] = 1.; out[i, 0, 1] = 0.; out[i, 0, 2] = 0. - out[i, 1, 0] = 0.; out[i, 1, 1] = 1.; out[i, 1, 2] = 0. - out[i, 2, 0] = 0.; out[i, 2, 1] = 0.; out[i, 2, 2] = 1. - else: - f1 = np.sin(phi)/phi - f2 = (1. - np.cos(phi)) / (phi*phi) - - out[i, 0, 0] = 1. - f2*(x[i, 2]*x[i, 2] + x[i, 1]*x[i, 1]) - out[i, 0, 1] = f2*x[i, 1]*x[i, 0] - f1*x[i, 2] - out[i, 0, 2] = f1*x[i, 1] + f2*x[i, 2]*x[i, 0] - - out[i, 1, 0] = f1*x[i, 2] + f2*x[i, 1]*x[i, 0] - out[i, 1, 1] = 1. - f2*(x[i, 2]*x[i, 2] + x[i, 0]*x[i, 0]) - out[i, 1, 2] = f2*x[i, 2]*x[i, 1] - f1*x[i, 0] - - out[i, 2, 0] = f2*x[i, 2]*x[i, 0] - f1*x[i, 1] - out[i, 2, 1] = f1*x[i, 0] + f2*x[i, 2]*x[i, 1] - out[i, 2, 2] = 1. - f2*(x[i, 1]*x[i, 1] + x[i, 0]*x[i, 0]) - - return out - - -""" -if the help above was set up to return nans... - -def make_rmat_of_expmap(exp_map): - exp_map = np.atleast_2d(exp_map) - rmats = np.empty((len(exp_map), 3, 3)) - _make_rmat_of_expmap(exp_map, rmats) - chk = np.isnan(rmats) - if np.any(chk): - rmats[chk] = np.tile( - [1., 0., 0., 0., 1., 0., 0., 0., 1.], np.sum(chk)/9 - ) - return rmats -""" - -@xf_api -def make_rmat_of_expmap(exp_map): - exp_map = np.atleast_2d(exp_map) - rmats = _make_rmat_of_expmap(exp_map) - return np.squeeze(rmats) - - -@xf_api -@xfapi_jit -def make_beam_rmat(bvec_l, evec_l): - # bvec_l and evec_l CANNOT have 0 magnitude! - # must catch this case as well as colinear bhat_l/ehat_l elsewhere... - bvec_mag = np.sqrt(bvec_l[0]**2 + bvec_l[1]**2 + bvec_l[2]**2) - - if bvec_mag < cnst.sqrt_epsf: - raise RuntimeError("bvec_l MUST NOT be ZERO!") - pass - - # assign Ze as -bhat_l - Ze0 = -bvec_l[0] / bvec_mag - Ze1 = -bvec_l[1] / bvec_mag - Ze2 = -bvec_l[2] / bvec_mag - - # find Ye as Ze ^ ehat_l - Ye0 = Ze1*evec_l[2] - evec_l[1]*Ze2 - Ye1 = Ze2*evec_l[0] - evec_l[2]*Ze0 - Ye2 = Ze0*evec_l[1] - evec_l[0]*Ze1 - - Ye_mag = np.sqrt(Ye0**2 + Ye1**2 + Ye2**2) - if Ye_mag < cnst.sqrt_epsf: - raise RuntimeError("bvec_l and evec_l MUST NOT be collinear!") - pass - - out = np.empty((3,3), dtype=bvec_l.dtype) - Ye0 /= Ye_mag - Ye1 /= Ye_mag - Ye2 /= Ye_mag - - # find Xe as Ye ^ Ze - Xe0 = Ye1*Ze2 - Ze1*Ye2 - Xe1 = Ye2*Ze0 - Ze2*Ye0 - Xe2 = Ye0*Ze1 - Ze0*Ye1 - - - out[0, 0] = Xe0 - out[0, 1] = Ye0 - out[0, 2] = Ze0 - - out[1, 0] = Xe1 - out[1, 1] = Ye1 - out[1, 2] = Ze1 - - out[2, 0] = Xe2 - out[2, 1] = Ye2 - out[2, 2] = Ze2 - - return out - - diff --git a/hexrd/transforms/new_capi/xf_numpy.py b/hexrd/transforms/new_capi/xf_numpy.py deleted file mode 100644 index d32b18466..000000000 --- a/hexrd/transforms/new_capi/xf_numpy.py +++ /dev/null @@ -1,1107 +0,0 @@ -#! /usr/bin/env python -# ============================================================================= -# Copyright (c) 2012, Lawrence Livermore National Security, LLC. -# Produced at the Lawrence Livermore National Laboratory. -# Written by Joel Bernier and others. -# LLNL-CODE-529294. -# All rights reserved. -# -# This file is part of HEXRD. For details on dowloading the source, -# see the file COPYING. -# -# Please also see the file LICENSE. -# -# This program is free software; you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License (as published by the Free -# Software Foundation) version 2.1 dated February 1999. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this program (see file LICENSE); if not, write to -# the Free Software Foundation, Inc., 59 Temple Place, Suite 330, -# Boston, MA 02111-1307 USA or visit . -# ============================================================================= - -# ??? do we want to set np.seterr(invalid='ignore') to avoid nan warnings? -import numpy as np -from numpy import float_ as npfloat -from numpy import int_ as npint - -from . import constants as cnst -from .transforms_definitions import xf_api - -# ============================================================================= -# HELPER FUNCTIONS -# ============================================================================= - -# ???: quicker to use conditionals below or just put in the identity matrix -# and multiply through? -def _beam_to_crystal(vecs, rmat_b=None, rmat_s=None, rmat_c=None): - """ - Helper function to take vectors defined in the BEAM frame through LAB - to either SAMPLE or CRYSTAL - - """ - orig_dims = vecs.ndim - vecs = np.atleast_2d(vecs) - nvecs = len(vecs) - if rmat_s is not None: - rmat_s = np.squeeze(rmat_s) - if rmat_s.ndim == 3: - # !!!: this implies individual sample frames for each vector - assert len(rmat_s) == nvecs, \ - "if specifying an array of rmat_s, dimensions must be " + \ - "(%d, 3, 3), not (%d, %d, %d)" \ - % tuple([nvecs] + list(rmat_s.shape)) - - # take to lab frame (row order) - # !!! rmat_b is defined as taking components from BEAM to LAB - if rmat_b is not None: - vecs = np.dot(vecs, rmat_b.T) - - # to go to CRYSTAL in column vec order (hstacked gvec_l): - # - # gvec_c = np.dot(rmat_c.T, np.dot(rmat_s.T, np.dot(rmat_b, gvec_b))) - # - # rmat_s = np.dot(rchi, rome) - # - # --> in row vec order (vstacked gvec_l, C order): - # - # gvec_l = np.dot(gvec_b, np.dot(rmat_b.T, np.dot(rmat_s, rmat_c))) - if rmat_s is not None: - if rmat_s.ndim > 2: - for i in range(nvecs): - vecs[i] = np.dot(vecs[i], rmat_s[i]) - else: - vecs = np.dot(vecs, rmat_s) - if rmat_c is not None: - return np.dot(vecs, rmat_c) - - return vecs[0] if orig_dims == 1 else vecs - - -def _crystal_to_lab(gvecs, - rmat_s, rmat_c, - bmat=None, vmat_inv=None): - """gvecs is (n, 3), but may also be (3,) - - rmat_s are either (3, 3) or (n, 3, 3) - - if bmat is not None, gvecs are assumed to be hkls - Takes a list of reciprocal lattice vectors components in crystal frame to - the specified detector-relative frame, subject to the conditions: - - 1) the reciprocal lattice vector must be able to satisfy a bragg condition - 2) the associated diffracted beam must intersect the detector plane - - Parameters - ---------- - gvecs : array_like - Concatenated triplets of G-vector components in either the - CRYSTAL FRAME or RECIPROCAL FRAME (see optional kwarg `bmat` below). - The shape when cast as an ndarray is (n, 3), representing n vectors. - rmat_s : array_like - The COB matrix taking components in the SAMPLE FRAME to the LAB FRAME. - This can be either (3, 3) or (n, 3, 3). In the latter case, each of the - n input G-vectors is transformed using the associated entry in - `rmat_s`. - rmat_c : array_like or None - The (3, 3) COB matrix taking components in the CRYSTAL FRAME to the - SAMPLE FRAME. None can be provided, in which case result will be - equivalent to an identity rmat_c, but operations will be skipped. - bmat : array_like, optional - The (3, 3) COB matrix taking components in the - RECIPROCAL LATTICE FRAME to the CRYSTAL FRAME; if supplied, it is - assumed that the input `gvecs` are G-vector components in the - RECIPROCL LATTICE FRAME (the default is None, which implies components - in the CRYSTAL FRAME) - vmat_inv : array_like, optional - The (3, 3) matrix of inverse stretch tensor components in the - SAMPLE FRAME. The default is None, which implies a strain-free state - (i.e. V = I). - - Returns - ------- - array_like - The (n, 3) array of G-vectors components in the LAB FRAME as specified - by `rmat_s` and `rmat_c`. Note that resulting vector components are - not normalized. - - Raises - ------ - AssertionError - If `rmat_s` has dimension 3, but the first is != n. - - Notes - ----- - - To go to the LAB FRAME from the CRYSTAL FRAME in column vec order (hstacked - gvec_c): - - gvec_l = np.dot(np.dot(rmat_c.T, np.dot(rmat_s.T, rmat_b)), gvec_b) - - rmat_s = np.dot(rchi, rome) - - --> in row vec order (vstacked gvec_l): - - gvec_l = np.dot(gvec_b, np.dot(rmat_b.T, np.dot(rmat_s, rmat_c))) - - """ - # some precondintions - assert gvecs.ndim <= 2 and gvecs.shape[-1] == 3 - assert rmat_s.ndim <= 3 and rmat_s.shape[-2:] == (3, 3) - assert (rmat_c is None) or (rmat_c.ndim == 2 and rmat_c.shape == (3, 3)) - - # catch 1-d input and grab number of input vectors - nvecs = 1 if gvecs.ndim == 1 else len(gvecs) - nmats = 1 if rmat_s.ndim == 2 else len(rmat_s) - - assert nvecs == 1 or nmats == 1 or nvecs==nmats - - # if bmat is specified, input are components in reiprocal lattice (h, k, l) - gvecs = gvecs if bmat is None else np.dot(gvecs, bmat.T) - - # CRYSTAL FRAME --> SAMPLE FRAME - gvec_s = gvecs if rmat_c is None else np.dot(gvecs, rmat_c.T) - gvec_s = gvec_s if vmat_inv is None else np.dot(gvec_s, vmat_inv.T) - - # SAMPLE FRAME --> LAB FRAME - if nmats > 1: - gvec_l = np.empty((nmats, 3)) - if nvecs == 1: - for i in range(nmats): - gvec_l[i] = np.dot(gvec_s, rmat_s[i].T) - else: - for i in range(nmats): - gvec_l[i] = np.dot(gvec_s[i], rmat_s[i].T) - else: - # single rmat_s - gvec_l = np.dot(gvec_s, rmat_s.T) - - return gvec_l - - -def _rmat_s_helper(ome, chi=None): - """ - simple utility to avoid multiplying by identity for chi=0 when - calculating sample rotation matrices - """ - if chi is None: - return np.array([make_rmat_of_expmap(i*cnst.lab_y) for i in ome]) - else: - return make_sample_rmat(chi, ome) - - -def _z_project(x, y): - return np.cos(x) * np.sin(y) - np.sin(x) * np.cos(y) - - -# diffract has code that assumes the beam direction is [0.0, 0.0, -1.0]. If this -# changes, diffract must be changed as well. -assert np.allclose(cnst.beam_vec, np.r_[0.0, 0.0, -1.0]) -assert np.allclose(cnst.lab_z, np.r_[0.0, 0.0, 1.0]) -def diffract(gvec, beam=None): - """Diffract beam using gvec. - - Parameters - ---------- - gvec : array_like - ([N,] 3) G-vectors to diffract against in the same frame as the beam - - beam : array_like or None - (3,) beam vector to diffract. If None [0,0,1] will be used. - - Returns - ------- - array - ([N,] 3) diffraction vectors. NaNs will be used in results for G-vectors - that can't diffract. - - Notes - ----- - All input vectors are assumed normalized. - """ - if gvec.ndim > 2 or gvec.shape[-1] != 3: - raise ValueError("'gvec' does not match expected dimensions") - - if beam is not None and beam.shape != (3,): - raise ValueError("'beam' does not match expected dimensions") - - ztol = cnst.epsf - onedimensional = gvec.ndim == 1 - if (onedimensional): - gvec = gvec[np.newaxis,...] - - result = np.empty_like(gvec) - if beam is None: - z = cnst.lab_z # [0.0, 0.0, 1.0] - for i, v in enumerate(gvec): - # as beam is [0, 0, -1] so dot(-beam, v) is actually v[2]. - if ztol <= v[2] <= (1.0 - ztol): - # can diffract, optimized diffraction for standard beam - result[i,:] = 2.0*v[2]*v - z - else: - result[i,:] = np.nan - - else: - minus_beam = -beam - for i, v in enumerate(gvec): - if ztol <= np.dot(v, minus_beam) <= (1.0 - ztol): - result[i,:] = make_binary_rmat(v) @ minus_beam - else: - result[i,:] = np.nan - - return np.squeeze(result, axis=0) if onedimensional else result - - -# ============================================================================= -# MAIN FUNCTIONS -# ============================================================================= - - -@xf_api -def angles_to_gvec( - angs, - beam_vec=None, eta_vec=None, - chi=None, rmat_c=None): - - beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec - eta_vec = eta_vec if eta_vec is not None else cnst.eta_vec - - orig_ndim = angs.ndim - angs = np.atleast_2d(angs) - nvecs, dim = angs.shape - - # make vectors in BEAM FRAME - gvec_b = np.vstack([ - [np.cos(0.5*angs[:, 0]) * np.cos(angs[:, 1])], - [np.cos(0.5*angs[:, 0]) * np.sin(angs[:, 1])], - [np.sin(0.5*angs[:, 0])]]).T - - # need rmat_b - rmat_b = make_beam_rmat(beam_vec, eta_vec) - - # handle sample frame(s) - rmat_s = None - if dim > 2: - rmat_s = _rmat_s_helper(angs[:, 2], chi=chi) - result = _beam_to_crystal(gvec_b, rmat_b=rmat_b, - rmat_s=rmat_s, rmat_c=rmat_c) - - return result[0] if orig_ndim == 1 else result - - -@xf_api -def angles_to_dvec( - angs, - beam_vec=None, eta_vec=None, - chi=None, rmat_c=None): - - beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec - eta_vec = eta_vec if eta_vec is not None else cnst.eta_vec - - angs = np.atleast_2d(angs) - nvecs, dim = angs.shape - - # make vectors in beam frame - dvec_b = np.vstack([ - [np.sin(angs[:, 0]) * np.cos(angs[:, 1])], - [np.sin(angs[:, 0]) * np.sin(angs[:, 1])], - [-np.cos(angs[:, 0])]]).T - - # need rmat_b - rmat_b = make_beam_rmat(beam_vec, eta_vec) - - # handle sample frame(s) - rmat_s = None - if dim > 2: - rmat_s = _rmat_s_helper(angs[:, 2], chi=chi) - return _beam_to_crystal(dvec_b, beam_vec=beam_vec, eta_vec=eta_vec, - rmat_s=rmat_s, rmat_c=rmat_c) - - -@xf_api -def gvec_to_xy(gvec_c, - rmat_d, rmat_s, rmat_c, - tvec_d, tvec_s, tvec_c, - beam_vec=None, - vmat_inv=None, - bmat=None): - - beam_vec = beam_vec if beam_vec is not None else cnst.beam_vec - - ztol = cnst.epsf - - # catch 1-d input case and initialize return array with NaNs - onedimensional = gvec_c.ndim == 1 - if onedimensional: - gvec_c = np.atleast_2d(gvec_c) - - retval = np.empty_like(gvec_c) - retval.fill(np.nan) - - nvec_l = rmat_d[:, 2] # detector normal (LAB FRAME) - bhat_l = unit_vector(beam_vec.flatten()) # unit beam vector - - # need CRYSTAL frame origin. If rmat_s is 3-d, this will be a list - # !!!: use _crystal_to_lab helper with trivial rmat_c - P0_l = _crystal_to_lab(tvec_c, rmat_s, None) # CRYSTAL FRAME origin - P3_l = tvec_d # DETECTOR FRAME origin - - # form unit reciprocal lattice vectors in lab frame (w/o translation) - if bmat is None: - # got hkls as input - ghat_l = _crystal_to_lab( - unit_vector(gvec_c), rmat_s, rmat_c, - bmat=None, vmat_inv=vmat_inv - ) - else: - # got G-vectors in CRYSTAL FRAME as input - ghat_l = unit_vector( - _crystal_to_lab( - gvec_c, rmat_s, rmat_c, bmat=bmat, vmat_inv=vmat_inv - ) - ) - # dot with beam vector (upstream, cone axis) - bdot = np.dot(ghat_l, -bhat_l) - - # see who can diffract; initialize output array with NaNs - can_diffract = np.logical_and(bdot >= ztol, bdot <= 1. - ztol) - if np.any(can_diffract): - # subset of feasible reciprocal lattice vectors - adm_ghat_l = np.atleast_2d(ghat_l[can_diffract, :]) - - # initialize diffracted beam vector array - dvec_l = np.empty_like(adm_ghat_l) - for i, v in enumerate(adm_ghat_l): - dvec_l[i] = np.dot(make_binary_rmat(v), -bhat_l) - pass - - ''' displacement vector calculation below - ''' - - # first check for non-instersections and mitigate divide-by-zero - # ???: better to use np.divide and feed NaNs through? - denom = np.dot(dvec_l, nvec_l) - dzero = abs(denom) < ztol - denom[dzero] = 1. - cant_intersect = denom > 0. # index to dvec_l that can't hit det - - # displacement scaling (along dvec_l) - u = np.dot(P3_l - P0_l, nvec_l) / denom - # filter out non-intersections, fill with NaNs - u[np.logical_or(dzero, cant_intersect)] = np.nan - - u_tiled = np.tile(u, (3,1)).T - # diffracted beam points IN DETECTOR FRAME - P2_l = P0_l + np.tile(u, (3, 1)).T * dvec_l - P2_d = np.dot(P2_l - tvec_d, rmat_d) - - # put feasible transformed gvec intersections into return array - retval[can_diffract, :] = P2_d - - return retval[0, :2] if onedimensional else retval[:,:2] - - -@xf_api -def xy_to_gvec(xy_d, - rmat_d, rmat_s, - tvec_d, tvec_s, tvec_c, - rmat_b=None, - distortion=None, - output_ref=False): - - # catch 1-d input and grab number of input vectors - xy_d = np.atleast_2d(xy_d) - npts = len(xy_d) - - # need beam vector - bhat_l = cnst.beam_vec - if rmat_b is not None: - bhat_l = -rmat_b[:, 2] - else: - rmat_b = cnst.identity_3x3 - - # if a distortion function is supplied, apply unwarping - if distortion is not None: - xy_d = distortion.unwarp(xy_d) - - # form in-plane vectors for detector points list in DETECTOR FRAME - P2_d = np.hstack([xy_d, np.zeros((npts, 1))]) - - # define points for ray calculations - P2_l = np.dot(P2_d, rmat_d.T) + tvec_d # inputs in LAB FRAME - P0_l = np.dot(tvec_c, rmat_s.T) + tvec_s # origin of CRYSTAL FRAME - - # diffraction unit vector components in LAB FRAME ans BEAM FRAME - dhat_l = unit_vector(P2_l - P0_l) - dhat_b = np.dot(dhat_l, rmat_b) - - # get bragg angle and azimuth of diffracted beam - tth = np.arccos(np.dot(bhat_l.T, dhat_l)).flatten() - eta = np.arctan2(dhat_b[1, :], dhat_b[0, :]).flatten() - - # get G-vectors by Laue condition - ghat_l = unit_vector(dhat_l - bhat_l) - - if output_ref: - # angles for reference frame - dhat_ref_l = unit_vector(P2_l) - dhat_ref_b = np.dot(dhat_ref_l, rmat_b) - tth_ref = np.arccos(np.dot(bhat_l.T, unit_vector(P2_l))).flatten() - eta_ref = np.arctan2(dhat_ref_b[1, :], dhat_ref_b[0, :]).flatten() - return (tth, eta), ghat_l, (tth_ref, eta_ref) - else: - return (tth, eta), ghat_l - - -@xf_api -def solve_omega(gvecs, chi, rmat_c, wavelength, - bmat=None, vmat_inv=None, rmat_b=None): - gvecs = np.atleast_2d(gvecs) - - # sin and cos of the oscillation axis tilt - cchi = np.cos(chi) - schi = np.sin(chi) - - # transform input to sampe frame and normalize - gvec_s = _crystal_to_lab(gvecs, cnst.identity_3x3, rmat_c, - bmat=bmat, vmat_inv=vmat_inv) - ghat_s = unit_vector(gvec_s) - one_by_dsp = row_norm(gvec_s) - - # sin of the Bragg angle using wavelength and d-spacings in Bragg's Law - sintht = 0.5 * wavelength * one_by_dsp - - # calculate coefficients for harmonic equation - # !!!: should all be 1-d - if rmat_b is not None: - ''' NON-STANDARD FRAME - ''' - bhat_l = -rmat_b[:, 2] - - # coefficients for harmonic equation - a = ghat_s[2, :]*bhat_l[0] \ - + schi*ghat_s[0, :]*bhat_l[1] \ - - cchi*ghat_s[0, :]*bhat_l[2] - b = ghat_s[0, :]*bhat_l[0] \ - - schi*ghat_s[2, :]*bhat_l[1] \ - + cchi*ghat_s[2, :]*bhat_l[2] - c = -sintht \ - - cchi*ghat_s[1, :]*bhat_l[1] \ - - schi*ghat_s[1, :]*bhat_l[2] - else: - ''' STANDARD FRAME; bhat_l = [0, 0, -1] - ''' - a = cchi*ghat_s[0, :] - b = -cchi*ghat_s[2, :] - c = schi*ghat_s[1, :] - sintht - - # form solution - ab_mag = np.sqrt(a*a + b*b) - phase_ang = np.arctan2(b, a) - rhs = c / ab_mag - rhs[abs(rhs) > 1.] = np.nan - rhs_ang = np.arcsin(rhs) # will give NaN for abs(rhs) > 1. + 0.5*epsf - - # write ome angle output arrays (NaNs persist here) - ome0 = rhs_ang - phase_ang - ome1 = np.pi - rhs_ang - phase_ang - - # both solutions are invalid for the same inputs, so mark using ome0 - valid_solutions = ~np.isnan(ome0) - - # calculate etas - if np.any(valid_solutions): - # initialize eta arrays - eta0 = np.nan * np.ones_like(ome0) - eta1 = np.nan * np.ones_like(ome1) - - vs_idx_array = np.tile(valid_solutions, (1, 2)).flatten() - - num_valid = sum(valid_solutions) - tmp_gvec = np.tile(ghat_s, (1, 2))[:, vs_idx_array] - all_ome = np.hstack([ome0, ome1]) - - # calculate the SAMPLE FRAME COB matrices for each omega - rmat_s = make_sample_rmat(chi, all_ome[vs_idx_array]) - - # return unit G-vectors in LAB FRAME - ghat_l = _crystal_to_lab(tmp_gvec, - rmat_s, None, - bmat=None, vmat_inv=None) - - # if non-standard beam frame is specified, transform ghat_l to - # BEAM FRAME in place - if rmat_b is not None: - ghat_l = np.dot(ghat_l, rmat_b) - - # get etas in BEAM FRAME using arctan2 - all_eta = np.arctan2(ghat_l[:, 1], ghat_l[:, 0]) - - # assign solutions to output array - eta0[valid_solutions] = all_eta[:num_valid] - eta1[valid_solutions] = all_eta[num_valid:] - - # make assoc tth array - tth = 2.*np.arcsin(sintht).flatten() - tth[~valid_solutions] = np.nan - - sol0 = np.vstack([tth.flatten(), eta0.flatten(), ome0.flatten()]).T - sol1 = np.vstack([tth.flatten(), eta1.flatten(), ome1.flatten()]).T - return sol0, sol1 - - else: - # ???: is this what we should do here? - return ome0.flatten(), ome1.flatten() - - -@xf_api -def gvec_to_rays(gvec_c, rmat_s, rmat_c, tvec_s, tvec_c, beam_vec=None): - # The problem is well defined when gvec_c is ([N,] 3,), rmat_s is ([N,] 3, 3) - # and tvec_c is ([M,] 3,). Note that N in rmat_s may be ommitted even when - # there is an N in gvec_c. That means rmat_s is broadcast for all gvec_c. - if gvec_c.ndim not in (1, 2) or gvec_c.shape[-1] != 3: - raise ValueError("'gvec_c' does not match expected dimensions") - - if rmat_s.ndim not in (2, 3) or rmat_s.shape[-2:] != (3, 3): - raise ValueError("'rmat_s' does not match expected dimensions") - - if rmat_c.shape != (3, 3): - raise ValueError("'rmat_c' does not match expected dimensions") - - if tvec_s.shape != (3,): - raise ValueError("'tvec_s' does not match expected dimensions") - - if tvec_c.ndim not in (1, 2) or gvec_c.shape[-1] != 3: - raise ValueError("'tvec_c' does not match expected dimensions") - - if beam_vec is not None and beam_vec.shape != (3,): - raise ValueError("'beam_vec' does not match expected dimensions") - - M = None if tvec_c.ndim == 1 else len(tvec_c) - N = None if gvec_c.ndim == 1 else len(gvec_c) - - if rmat_s.ndim == 3 and len(rmat_s) != N: - raise ValueError("'gvec_c' and 'rmat_s' mismatching dimensions") - - bhat_l = unit_vector(beam_vec.flatten()) if beam_vec is not None else cnst.beam_vec - ztol = cnst.epsf - - result_dtype = np.result_type(gvec_c, rmat_s, rmat_c, tvec_s, tvec_c) - if N is None: - # only 1 gvec: one vector and as many origins as - vectors = np.empty((3,), dtype=result_dtype) - if M is None: - origins = np.empty((3,), dtype=result_dtype) - else: - origins = np.empty((M,3), dtype=result_dtype) - else: - # several gvec - vectors = np.empty((N,3), dtype=result_dtype) - if rmat_s.ndim == 2: - # when rmat_s is broadcast, as many origin points as voxels - origins = np.empty((3,) if M is None else (M, 3), dtype=result_dtype) - else: - # for an rmat_s per gvec, as many origin points as voxels x gvec are - # needed - origins = np.empty((N, 3) if M is None else (M, N, 3), - dtype=result_dtype) - - ## compute origins - # origins for a single element would be: - # origin = tvec_s + rmat_s x tvec_c. - if rmat_s.ndim == 2: - # the trick here is that if there is moer than one tvec_c, matmul expects - # the different vectors as columns (and will write the result vectors in - # columns. By using the transpose in tvec_c input vectors are arranged - # by columns, and by using transpose in the input the results are written - # as expected (as rows instead of columns) - np.matmul(rmat_s, tvec_c.T, out=origins.T) - else: # multiple rmat_s - if tvec_c.ndim == 1: - np.matmul(rmat_s, tvec_c, out=origins) - else: - # when multiple tvec_c and multiple rmat_s, things have to be arranged - # so that results are ordered in the right way. - np.matmul(rmat_s, tvec_c[:,np.newaxis,:,np.newaxis], - out=origins[..., np.newaxis]) - origins += tvec_s - - ## compute diffractions. - # gvec_c -> gvec_s -> gvec_l. vectors = diffract(gvec_l, beam). - # 1. Put gvecs in LAB frame - if rmat_s.ndim == 2: - # note: rmat_s @ rmat_c is evaluated first, which is the most efficient - # way when there is a single rmat_s (unless N is *very* small). - gvec_l = rmat_s @ rmat_c @ gvec_c.T - gvec_l = gvec_l.T - else: - # In this case, in order of rmat_s be applied properly, a dimension will - # be added to the result of rmat_c x gvec_c.T. This way, the dimensions - # for that operand will be (N, 3, 1) (column vectors of gvec_s) which - # will "matmul" properly with (N,3,3). So (N,3,3) x (N, 3, 1) will - # result in (N, 3, 1). The last dimension of the result will need to be - # dropped. - gvec_l = rmat_s @ (rmat_c @ gvec_c.T).T[..., np.newaxis] - gvec_l = np.squeeze(gvec_l, axis=-1) - - # diffract - vectors = diffract(gvec_l, bhat_l) - - return vectors, origins - - -@xf_api -def rays_to_xy_planar(vectors, origins, rmat_d, tvec_d, origin_per_vector=False): - base_origin_dims = 2 if origin_per_vector and vectors.ndim > 1 else 1 - N = None if vectors.ndim <= 1 else len(vectors) - M = None if origins.ndim <= base_origin_dims else len(origins) - expected_vectors_shape = (3,) if N is None else (N,3) - - if origin_per_vector: - if N is None: - expected_origins_shape = (3,) if M is None else (M, 3) - else: - expected_origins_shape = (N, 3) if M is None else (M, N, 3) - else: - expected_origins_shape = (3,) if M is None else (M, 3) - - if vectors.shape != expected_vectors_shape: - raise ValueError("'vectors' does not match expected dimensions") - - if origins.shape != expected_origins_shape: - raise ValueError("'origins' does not match expected dimensions") - - if rmat_d.shape != (3,3): - raise ValueError("'rmat_d' does not match expected dimensions") - - if tvec_d.shape != (3,): - raise ValueError("'tvec_d' does not match expected dimensions") - - result_shape = tuple() if M is None else (M,) - result_shape += (2,) if N is None else (N,2) - result = np.empty_like(vectors, shape=result_shape) - - # offsets would be the offsets that need to be applied in order to move - # a point in LAB frame into DETECTOR frame once they are already rotated. - # offsets[2] happens to be the D element in the plane formula when taking - # the Z column vector of rmat_d as the plane normal - offsets = -(tvec_d @ rmat_d) - - # In the intersection code, advantage is taken from the IEEE754 divide - # behavior generating NAN for divide by 0. Code is written so that those - # NAN are propagated. - with np.errstate(divide='ignore', invalid='ignore'): - if N is None or not origin_per_vector: - if M is not None: - vect_in_d = vectors@rmat_d.T # vectorized for N - for m_i in range(M): - # pos_in_d will actually be x_base, y_base *and* num, being - # num the numerator for the ray-plane intersection. x_base - # and y_base will be useful to compute the positions - pos_in_d = origins[m_i]@rmat_d.T + offsets # vector 3 - t = pos_in_d[2]/vect_in_d[...,2] - if t.ndim: - t[t>0.0] = np.nan - else: - t = np.nan if t>0.0 else t - t = np.expand_dims(t, axis=-1) - result[m_i,...] = pos_in_d[0:2] - t*vect_in_d[..., 0:2] - else: - pos_in_d = origins@rmat_d.T + offsets # vector 3 - vect_in_d = vectors@rmat_d.T # vectorized for N - t = pos_in_d[2]/vect_in_d[...,2] - if t.ndim: - t[t>0.0] = np.nan - else: - t = np.nan if t>0.0 else t - t = np.expand_dims(t, axis=-1) - vect_in_d = np.expand_dims(vect_in_d, axis=0) - result[...] = pos_in_d[0:2] - t*vect_in_d[..., 0:2] - else: # origin per vector... and there are several vectors! - all_vect_in_d = vectors@rmat_d.T # vectorized for N - for n_i in range(N): - # In this case it makes more sense to vectorize on M - # pos_in_d will actually be x_base, y_base *and* num, being - # num the numerator for the ray-plane intersection. x_base - # and y_base will be useful to compute the positions - vect_in_d = all_vect_in_d if all_vect_in_d.ndim == 1 else all_vect_in_d[n_i] - pos_in_d = origins[...,n_i,:]@rmat_d.T + offsets # vector 3 - t = pos_in_d[...,2]/vect_in_d[2] - if t.ndim: - t[t>0.0] = np.nan - else: - t = np.nan if t>0.0 else t - t = np.expand_dims(t, axis=-1) - vect_in_d = np.expand_dims(vect_in_d, axis=0) - result[..., n_i, :] = pos_in_d[...,0:2] - t*vect_in_d[...,0:2] - - return result - - -# ============================================================================= -# UTILITY FUNCTIONS -# ============================================================================= - -@xf_api -def angular_difference(ang_list0, ang_list1, units=None): - period = cnst.period_for_unit(units) - # take difference as arrays - diffAngles = np.atleast_1d(ang_list0) - np.atleast_1d(ang_list1) - - return abs(np.remainder(diffAngles + 0.5*period, period) - 0.5*period) - - -@xf_api -def map_angle(ang, range=None, units=None): - period = cnst.period_for_unit(units) - - ang = np.atleast_1d(npfloat(ang)) - - # if we have a specified angular range, use it - if range is not None: - angRange = np.atleast_1d(npfloat(args[0])) - - # divide of multiples of period - ang = ang - npint(ang / period) * period - - lb = angRange.min() - ub = angRange.max() - - if abs(ub - lb) != period: - raise RuntimeError('range is incomplete!') - - lbi = ang < lb - while lbi.sum() > 0: - ang[lbi] = ang[lbi] + period - lbi = ang < lb - pass - ubi = ang > ub - while ubi.sum() > 0: - ang[ubi] = ang[ubi] - period - ubi = ang > ub - pass - retval = ang - # shouldn't all this be equivalent to: - # retval = np.mod(ang - lb, period) + lb ???? - # note the particular case below for range (-0.5*period, +0.5*period) - # where lb would be -0.5*period. - - else: - retval = np.mod(ang + 0.5*period, period) - 0.5*period - return retval - -@xf_api -def row_norm(vec_in): - if len(vec_in.shape) > 2: - raise ValueError( - "incorrect shape: arg must be 1-d or 2-d, yours is %d" - % (len(vec_in.shape)) - ) - return np.sqrt(np.sum(np.asarray(vec_in)**2, axis=-1)) - - -@xf_api -def unit_vector(vec_in): - orig_dims = vec_in.ndim - if vec_in.ndim not in [1,2]: - # Make sure that dimensions are supported - raise ValueError( - "incorrect arg shape; must be 1-d or 2-d, yours is %d-d" - % (vec_in.ndim) - ) - - a = np.atleast_2d(vec_in) - n = a.shape[1] - - # calculate row norms and prevent divide by zero - nrm = np.sqrt(np.sum(a*a, axis=1)) - nrm[nrm <= cnst.epsf] = 1. - normalized = a/nrm[:,np.newaxis] - - return normalized[0] if orig_dims == 1 else normalized - - -@xf_api -def make_sample_rmat(chi, ome): - # angle chi about LAB X - cchi = np.cos(chi) - schi = np.sin(chi) - rchi = np.array([[1., 0., 0.], - [0., cchi, -schi], - [0., schi, cchi]], dtype = np.double) - - # angle ome about SAMPLE Y ([0., cos(chi), sin(chi)] in LAB) - ome = np.atleast_1d(ome) - if len(ome) > 1: - rmat_s = np.array( - [np.dot(rchi, make_rmat_of_expmap(i*cnst.lab_y)) for i in ome] - ) - else: - come = np.cos(ome[0]) - some = np.sin(ome[0]) - rome = np.array([[come, 0., some], - [0., 1., 0.], - [-some, 0., come]], dtype = np.double) - rmat_s = np.dot(rchi, rome) - return rmat_s - - -@xf_api -def make_rmat_of_expmap(exp_map): - phi = np.sqrt( - exp_map[0]*exp_map[0] - + exp_map[1]*exp_map[1] - + exp_map[2]*exp_map[2] - ) - if phi > cnst.epsf: - wmat = np.array([ - [0., -exp_map[2], exp_map[1]], - [exp_map[2], 0., -exp_map[0]], - [-exp_map[1], exp_map[0], 0.], - ]) - rmat = \ - cnst.identity_3x3 \ - + (np.sin(phi)/phi)*wmat \ - + ((1. - np.cos(phi))/(phi*phi))*np.dot(wmat, wmat) - else: - rmat = cnst.identity_3x3 - return rmat - - -@xf_api -def make_binary_rmat(axis): - if axis.shape != (3,): - raise ValueError('Axis input does not have 3 components') - return 2*np.outer(axis, axis) - cnst.identity_3x3 - - -@xf_api -def make_beam_rmat(bvec_l, evec_l): - # normalize input - bhat_l = unit_vector(bvec_l) - ehat_l = unit_vector(evec_l) - - # find Ye as cross(ehat_l, bhat_l), normalize if kosher - Ye = np.cross(ehat_l, bhat_l) - if np.sqrt(np.sum(Ye*Ye)) < cnst.sqrt_epsf: - raise RuntimeError("bvec_l and evec_l must NOT be colinear!") - Ye = unit_vector(Ye) - - # find Xe as cross(bhat_l, Ye) - Xe = np.cross(bhat_l, Ye) - - return np.vstack([Xe, Ye, -bhat_l]) - - -@xf_api -def angles_in_range(angles, starts, stops, degrees=True): - tau = 360.0 if degrees else 2*np.pi - nw = len(starts) - na = len(angles) - in_range = np.zeros((na), dtype=bool) - for i in range(nw): - amin = starts[i] - amax = stops[i] - for j in range(na): - a = angles[j] - acheck = amin + np.mod(a - amin, tau) - if acheck <= amax: - in_range[j] = True - return in_range - - -@xf_api -def validate_angle_ranges(ang_list, start_angs, stop_angs, ccw=True): - # Prefer ravel over flatten because flatten never skips the copy - ang_list = np.asarray(ang_list).ravel() - startAngs = np.asarray(start_angs).ravel() - stopAngs = np.asarray(stop_angs).ravel() - - n_ranges = len(start_angs) - assert len(stop_angs) == n_ranges, \ - "length of min and max angular limits must match!" - - # to avoid warnings in >=, <= later down, mark nans; - # need these to trick output to False in the case of nan input - nan_mask = np.isnan(ang_list) - - reflInRange = np.zeros(ang_list.shape, dtype=bool) - - # bin length for chunking - binLen = np.pi / 2. - - # in plane vectors defining wedges - x0 = np.vstack([np.cos(startAngs), np.sin(startAngs)]) - x1 = np.vstack([np.cos(stopAngs), np.sin(stopAngs)]) - - # dot products - dp = np.sum(x0 * x1, axis=0) - if np.any(dp >= 1. - cnst.sqrt_epsf) and n_ranges > 1: - # ambiguous case - raise RuntimeError( - "Improper usage; at least one of your ranges" - + "is already 360 degrees!" - ) - elif dp[0] >= 1. - cnst.sqrt_epsf and n_ranges == 1: - # trivial case! - reflInRange = np.ones(ang_list.shape, dtype=bool) - reflInRange[nan_mask] = False - else: - # solve for arc lengths - # ...note: no zeros should have made it here - a = x0[0, :]*x1[1, :] - x0[1, :]*x1[0, :] - b = x0[0, :]*x1[0, :] + x0[1, :]*x1[1, :] - phi = np.arctan2(b, a) - - arclen = 0.5*np.pi - phi # these are clockwise - cw_phis = arclen < 0 - arclen[cw_phis] = 2*np.pi + arclen[cw_phis] # all positive (CW) now - if not ccw: - arclen = 2*np.pi - arclen - - if sum(arclen) > 2*np.pi: - raise RuntimeWarning( - "Specified angle ranges sum to > 360 degrees" - + ", which is suspect..." - ) - - # check that there are no more thandp = np.zeros(n_ranges) - for i in range(n_ranges): - # number or subranges using 'binLen' - numSubranges = int(np.ceil(arclen[i]/binLen)) - - # check remaider - binrem = np.remainder(arclen[i], binLen) - if binrem == 0: - finalBinLen = binLen - else: - finalBinLen = binrem - - # if clockwise, negate bin length - if not ccw: - binLen = -binLen - finalBinLen = -finalBinLen - - # Create sub ranges on the fly to avoid ambiguity in dot product - # for wedges >= 180 degrees - subRanges = np.array( - [startAngs[i] + binLen*j for j in range(numSubranges)] - + [startAngs[i] + binLen*(numSubranges - 1) + finalBinLen]) - - for k in range(numSubranges): - zStart = _z_project(ang_list, subRanges[k]) - zStop = _z_project(ang_list, subRanges[k + 1]) - if ccw: - zStart[nan_mask] = 999. - zStop[nan_mask] = -999. - reflInRange = \ - reflInRange | np.logical_and(zStart <= 0, zStop >= 0) - else: - zStart[nan_mask] = -999. - zStop[nan_mask] = 999. - reflInRange = \ - reflInRange | np.logical_and(zStart >= 0, zStop <= 0) - return reflInRange - - -@xf_api -def rotate_vecs_about_axis(angle, axis, vecs): - angle = np.atleast_1d(angle) - axis = axis if axis.ndim > 1 else axis[:,np.newaxis] - - assert vecs.shape[0] == 3 and vecs.ndim == 2 - assert angle.shape[0] == 1 or angle.shape[0] == vecs.shape[-1] - assert axis.shape == (3,1) or axis.shape == vecs.shape - - # nvecs = vecs.shape[1] # assume column vecs - - # quaternion components - q0 = np.cos(0.5*angle) - q1 = np.sin(0.5*angle) - qv = np.tile(q1, (3, 1)) * axis - - # component perpendicular to axes (inherits shape of vecs) - vp0 = vecs[0, :] \ - - axis[0, :]*axis[0, :]*vecs[0, :] \ - - axis[0, :]*axis[1, :]*vecs[1, :] \ - - axis[0, :]*axis[2, :]*vecs[2, :] - vp1 = vecs[1, :] \ - - axis[1, :]*axis[1, :]*vecs[1, :] \ - - axis[1, :]*axis[0, :]*vecs[0, :] \ - - axis[1, :]*axis[2, :]*vecs[2, :] - vp2 = vecs[2, :] \ - - axis[2, :]*axis[2, :]*vecs[2, :] \ - - axis[2, :]*axis[0, :]*vecs[0, :] \ - - axis[2, :]*axis[1, :]*vecs[1, :] - - # dot product with components along; cross product with components normal - qdota = \ - (axis[0, :]*vecs[0, :] - + axis[1, :]*vecs[1, :] - + axis[2, :]*vecs[2, :]) \ - * \ - (axis[0, :]*qv[0, :] - + axis[1, :]*qv[1, :] - + axis[2, :]*qv[2, :]) - qcrossn = np.vstack([qv[1, :]*vp2 - qv[2, :]*vp1, - qv[2, :]*vp0 - qv[0, :]*vp2, - qv[0, :]*vp1 - qv[1, :]*vp0]) - - # quaternion formula - v_rot = np.tile(q0*q0 - q1*q1, (3, 1)) * vecs \ - + 2. * np.tile(qdota, (3, 1)) * qv \ - + 2. * np.tile(q0, (3, 1)) * qcrossn - return v_rot - - -@xf_api -def quat_product_matrix(q, mult='right'): - if mult == 'right': - qmat = np.array([[ q[0], -q[1], -q[2], -q[3]], - [ q[1], q[0], q[3], -q[2]], - [ q[2], -q[3], q[0], q[1]], - [ q[3], q[2], -q[1], q[0]], - ]) - elif mult == 'left': - qmat = np.array([[ q[0], -q[1], -q[2], -q[3]], - [ q[1], q[0], -q[3], q[2]], - [ q[2], q[3], q[0], -q[1]], - [ q[3], -q[2], q[1], q[0]], - ]) - return qmat - - -@xf_api -def quat_distance(q1, q2, qsym): - # qsym from PlaneData objects are (4, nsym) - # convert symmetries to (4, 4) qprod matrices - nsym = qsym.shape[1] - rsym = np.zeros((nsym, 4, 4)) - for i in range(nsym): - rsym[i, :, :] = quat_product_matrix(qsym[:, i], mult='right') - - # inverse of q1 in matrix form - q1i = quat_product_matrix( - np.r_[1, -1, -1, -1]*np.atleast_1d(q1).flatten(), - mult='right' - ) - - # Do R * Gc, store as vstacked equivalent quaternions (nsym, 4) - q2s = np.dot(rsym, q2) - - # Calculate the class of misorientations for full symmetrically equivalent - # q1 and q2 as: - # - # q2*q1^(-1) - # - # using matrix notation (4, 4) * (4, nsym) - eqv_mis = np.dot(q1i, q2s.T) - - # find the largest scalar component and return arccos - return 2*np.arccos(eqv_mis[0, np.argmax(abs(eqv_mis[0, :]))]) diff --git a/hexrd/transforms/old_xfcapi.py b/hexrd/transforms/old_xfcapi.py index b3afa8432..56308ab47 100644 --- a/hexrd/transforms/old_xfcapi.py +++ b/hexrd/transforms/old_xfcapi.py @@ -480,12 +480,10 @@ def mapAngle(ang, *args, **kwargs): while lbi.sum() > 0: ang[lbi] = ang[lbi] + period lbi = ang < lb - pass ubi = ang > ub while ubi.sum() > 0: ang[ubi] = ang[ubi] - period ubi = ang > ub - pass retval = ang else: retval = np.mod(ang + 0.5*period, period) - 0.5*period diff --git a/hexrd/transforms/xf.py b/hexrd/transforms/xf.py index a8bb4c091..56e593b17 100644 --- a/hexrd/transforms/xf.py +++ b/hexrd/transforms/xf.py @@ -28,33 +28,32 @@ import sys import numpy as np +import numba + # np.seterr(invalid='ignore') import scipy.sparse as sparse from hexrd import matrixutil as mutil -from hexrd.constants import USE_NUMBA -if USE_NUMBA: - import numba # ============================================================================= # Module Data # ============================================================================= -epsf = np.finfo(float).eps # ~2.2e-16 -ten_epsf = 10 * epsf # ~2.2e-15 -sqrt_epsf = np.sqrt(epsf) # ~1.5e-8 +epsf = np.finfo(float).eps # ~2.2e-16 +ten_epsf = 10 * epsf # ~2.2e-15 +sqrt_epsf = np.sqrt(epsf) # ~1.5e-8 -periodDict = {'degrees': 360.0, 'radians': 2*np.pi} -angularUnits = 'radians' # module-level angle units -d2r = np.pi/180.0 +periodDict = {'degrees': 360.0, 'radians': 2 * np.pi} +angularUnits = 'radians' # module-level angle units +d2r = np.pi / 180.0 # basis vectors -I3 = np.eye(3) # (3, 3) identity -Xl = np.array([[1., 0., 0.]], order='C').T # X in the lab frame -Yl = np.array([[0., 1., 0.]], order='C').T # Y in the lab frame -Zl = np.array([[0., 0., 1.]], order='C').T # Z in the lab frame +I3 = np.eye(3) # (3, 3) identity +Xl = np.array([[1.0, 0.0, 0.0]], order='C').T # X in the lab frame +Yl = np.array([[0.0, 1.0, 0.0]], order='C').T # Y in the lab frame +Zl = np.array([[0.0, 0.0, 1.0]], order='C').T # Z in the lab frame zeroVec = np.zeros(3, order='C') @@ -63,7 +62,7 @@ eta_ref = Xl # reference stretch -vInv_ref = np.array([[1., 1., 1., 0., 0., 0.]], order='C').T +vInv_ref = np.array([[1.0, 1.0, 1.0, 0.0, 0.0, 0.0]], order='C').T # ============================================================================= @@ -94,50 +93,37 @@ def makeGVector(hkl, bMat): return unitVector(np.dot(bMat, hkl)) -if USE_NUMBA: - @numba.njit(nogil=True, cache=True) - def _anglesToGVecHelper(angs, out): - # gVec_e = np.vstack([[np.cos(0.5*angs[:, 0]) * np.cos(angs[:, 1])], - # [np.cos(0.5*angs[:, 0]) * np.sin(angs[:, 1])], - # [np.sin(0.5*angs[:, 0])]]) - n = angs.shape[0] - for i in range(n): - ca0 = np.cos(0.5*angs[i, 0]) - sa0 = np.sin(0.5*angs[i, 0]) - ca1 = np.cos(angs[i, 1]) - sa1 = np.sin(angs[i, 1]) - out[i, 0] = ca0 * ca1 - out[i, 1] = ca0 * sa1 - out[i, 2] = sa0 - - def anglesToGVec(angs, bHat_l, eHat_l, rMat_s=I3, rMat_c=I3): - """ - from 'eta' frame out to lab - (with handy kwargs to go to crystal or sample) - """ - rMat_e = makeEtaFrameRotMat(bHat_l, eHat_l) - gVec_e = np.empty((angs.shape[0], 3)) - _anglesToGVecHelper(angs, gVec_e) - mat = np.dot(rMat_c.T, np.dot(rMat_s.T, rMat_e)) - return np.dot(mat, gVec_e.T) -else: - def anglesToGVec(angs, bHat_l, eHat_l, rMat_s=I3, rMat_c=I3): - """ - from 'eta' frame out to lab - (with handy kwargs to go to crystal or sample) - """ - rMat_e = makeEtaFrameRotMat(bHat_l, eHat_l) - gVec_e = np.vstack([[np.cos(0.5*angs[:, 0]) * np.cos(angs[:, 1])], - [np.cos(0.5*angs[:, 0]) * np.sin(angs[:, 1])], - [np.sin(0.5*angs[:, 0])]]) - mat = np.dot(rMat_c.T, np.dot(rMat_s.T, rMat_e)) - return np.dot(mat, gVec_e) +@numba.njit(nogil=True, cache=True) +def _anglesToGVecHelper(angs, out): + # gVec_e = np.vstack([[np.cos(0.5*angs[:, 0]) * np.cos(angs[:, 1])], + # [np.cos(0.5*angs[:, 0]) * np.sin(angs[:, 1])], + # [np.sin(0.5*angs[:, 0])]]) + n = angs.shape[0] + for i in range(n): + ca0 = np.cos(0.5 * angs[i, 0]) + sa0 = np.sin(0.5 * angs[i, 0]) + ca1 = np.cos(angs[i, 1]) + sa1 = np.sin(angs[i, 1]) + out[i, 0] = ca0 * ca1 + out[i, 1] = ca0 * sa1 + out[i, 2] = sa0 + + +def anglesToGVec(angs, bHat_l, eHat_l, rMat_s=I3, rMat_c=I3): + """ + from 'eta' frame out to lab + (with handy kwargs to go to crystal or sample) + """ + rMat_e = makeEtaFrameRotMat(bHat_l, eHat_l) + gVec_e = np.empty((angs.shape[0], 3)) + _anglesToGVecHelper(angs, gVec_e) + mat = np.dot(rMat_c.T, np.dot(rMat_s.T, rMat_e)) + return np.dot(mat, gVec_e.T) -def gvecToDetectorXY(gVec_c, - rMat_d, rMat_s, rMat_c, - tVec_d, tVec_s, tVec_c, - beamVec=bVec_ref): +def gvecToDetectorXY( + gVec_c, rMat_d, rMat_s, rMat_c, tVec_d, tVec_s, tVec_c, beamVec=bVec_ref +): """ Takes a list of unit reciprocal lattice vectors in crystal frame to the specified detector-relative frame. @@ -176,10 +162,10 @@ def gvecToDetectorXY(gVec_c, """ ztol = epsf - nVec_l = np.dot(rMat_d, Zl) # detector plane normal + nVec_l = np.dot(rMat_d, Zl) # detector plane normal bHat_l = unitVector(beamVec.reshape(3, 1)) # make sure beam vector is unit - P0_l = tVec_s + np.dot(rMat_s, tVec_c) # origin of CRYSTAL FRAME - P3_l = tVec_d # origin of DETECTOR FRAME + P0_l = tVec_s + np.dot(rMat_s, tVec_c) # origin of CRYSTAL FRAME + P3_l = tVec_d # origin of DETECTOR FRAME # form unit reciprocal lattice vectors in lab frame (w/o translation) gVec_l = np.dot(rMat_s, np.dot(rMat_c, unitVector(gVec_c))) @@ -189,7 +175,7 @@ def gvecToDetectorXY(gVec_c, # see who can diffract; initialize output array with NaNs canDiffract = np.atleast_1d( - np.logical_and(bDot >= ztol, bDot <= 1. - ztol) + np.logical_and(bDot >= ztol, bDot <= 1.0 - ztol) ) npts = sum(canDiffract) retval = np.nan * np.ones_like(gVec_l) @@ -203,7 +189,6 @@ def gvecToDetectorXY(gVec_c, dVec_l[:, ipt] = np.dot( makeBinaryRotMat(adm_gVec_l[:, ipt]), -bHat_l ).squeeze() - pass # ############################################################### # displacement vector calculation @@ -211,8 +196,8 @@ def gvecToDetectorXY(gVec_c, # first check for non-instersections denom = np.dot(nVec_l.T, dVec_l).flatten() dzero = abs(denom) < ztol - denom[dzero] = 1. # mitigate divide-by-zero - cantIntersect = denom > 0. # index to dVec_l that can't hit det + denom[dzero] = 1.0 # mitigate divide-by-zero + cantIntersect = denom > 0.0 # index to dVec_l that can't hit det # displacement scaling (along dVec_l) u = np.dot(nVec_l.T, P3_l - P0_l).flatten() / denom @@ -226,16 +211,21 @@ def gvecToDetectorXY(gVec_c, # put feasible transformed gVecs into return array retval[:, canDiffract] = P2_d - pass return retval[:2, :].T -def detectorXYToGvec(xy_det, - rMat_d, rMat_s, - tVec_d, tVec_s, tVec_c, - distortion=None, - beamVec=bVec_ref, etaVec=eta_ref, - output_ref=False): +def detectorXYToGvec( + xy_det, + rMat_d, + rMat_s, + tVec_d, + tVec_s, + tVec_c, + distortion=None, + beamVec=bVec_ref, + etaVec=eta_ref, + output_ref=False, +): """ Takes a list cartesian (x, y) pairs in the detector coordinates and calculates the associated reciprocal lattice (G) vectors and @@ -295,7 +285,7 @@ def detectorXYToGvec(xy_det, # in LAB FRAME P2_l = np.dot(rMat_d, P2_d) + tVec_d - P0_l = tVec_s + np.dot(rMat_s, tVec_c) # origin of CRYSTAL FRAME + P0_l = tVec_s + np.dot(rMat_s, tVec_c) # origin of CRYSTAL FRAME # diffraction unit vector components in LAB FRAME dHat_l = unitVector(P2_l - P0_l) @@ -304,8 +294,9 @@ def detectorXYToGvec(xy_det, # generate output # DEBUGGING - assert abs(np.dot(bHat_l.T, eHat_l)) < 1. - sqrt_epsf, \ - "eta ref and beam cannot be parallel!" + assert ( + abs(np.dot(bHat_l.T, eHat_l)) < 1.0 - sqrt_epsf + ), "eta ref and beam cannot be parallel!" rMat_e = makeEtaFrameRotMat(bHat_l, eHat_l) dHat_e = np.dot(rMat_e.T, dHat_l) @@ -330,8 +321,16 @@ def detectorXYToGvec(xy_det, return (tTh, eta), gVec_l -def oscillAnglesOfHKLs(hkls, chi, rMat_c, bMat, wavelength, - vInv=vInv_ref, beamVec=bVec_ref, etaVec=eta_ref): +def oscillAnglesOfHKLs( + hkls, + chi, + rMat_c, + bMat, + wavelength, + vInv=vInv_ref, + beamVec=bVec_ref, + etaVec=eta_ref, +): """ @@ -428,20 +427,30 @@ def oscillAnglesOfHKLs(hkls, chi, rMat_c, bMat, wavelength, schi = np.sin(chi) # coefficients for harmonic equation - a = gHat_s[2, :]*bHat_l[0] \ - + schi*gHat_s[0, :]*bHat_l[1] - cchi*gHat_s[0, :]*bHat_l[2] - b = gHat_s[0, :]*bHat_l[0] \ - - schi*gHat_s[2, :]*bHat_l[1] + cchi*gHat_s[2, :]*bHat_l[2] - c = -sintht - cchi*gHat_s[1, :]*bHat_l[1] - schi*gHat_s[1, :]*bHat_l[2] + a = ( + gHat_s[2, :] * bHat_l[0] + + schi * gHat_s[0, :] * bHat_l[1] + - cchi * gHat_s[0, :] * bHat_l[2] + ) + b = ( + gHat_s[0, :] * bHat_l[0] + - schi * gHat_s[2, :] * bHat_l[1] + + cchi * gHat_s[2, :] * bHat_l[2] + ) + c = ( + -sintht + - cchi * gHat_s[1, :] * bHat_l[1] + - schi * gHat_s[1, :] * bHat_l[2] + ) # should all be 1-d: a = a.flatten(); b = b.flatten(); c = c.flatten() # form solution - abMag = np.sqrt(a*a + b*b) + abMag = np.sqrt(a * a + b * b) assert np.all(abMag > 0), "Beam vector specification is infealible!" phaseAng = np.arctan2(b, a) rhs = c / abMag - rhs[abs(rhs) > 1.] = np.nan + rhs[abs(rhs) > 1.0] = np.nan rhsAng = np.arcsin(rhs) # will give NaN for abs(rhs) > 1. + 0.5*epsf # write ome angle output arrays (NaNs persist here) @@ -451,11 +460,12 @@ def oscillAnglesOfHKLs(hkls, chi, rMat_c, bMat, wavelength, goodOnes_s = -np.isnan(ome0) # DEBUGGING - assert np.all(np.isnan(ome0) == np.isnan(ome1)), \ - "infeasible hkls do not match for ome0, ome1!" + assert np.all( + np.isnan(ome0) == np.isnan(ome1) + ), "infeasible hkls do not match for ome0, ome1!" # do etas -- ONLY COMPUTE IN CASE CONSISTENT REFERENCE COORDINATES - if abs(np.dot(bHat_l.T, eHat_l)) < 1. - sqrt_epsf and np.any(goodOnes_s): + if abs(np.dot(bHat_l.T, eHat_l)) < 1.0 - sqrt_epsf and np.any(goodOnes_s): eta0 = np.nan * np.ones_like(ome0) eta1 = np.nan * np.ones_like(ome1) @@ -473,36 +483,42 @@ def oscillAnglesOfHKLs(hkls, chi, rMat_c, bMat, wavelength, for i in range(numGood): rMat_s = makeOscillRotMat([chi, allome[goodOnes][i]]) gVec_e = np.dot( - rMat_e.T, np.dot( - rMat_s, np.dot( - rMat_c, tmp_gvec[:, i].reshape(3, 1) - ) - ) + rMat_e.T, + np.dot(rMat_s, np.dot(rMat_c, tmp_gvec[:, i].reshape(3, 1))), ) tmp_eta[i] = np.arctan2(gVec_e[1], gVec_e[0]) - pass eta0[goodOnes_s] = tmp_eta[:numGood_s] eta1[goodOnes_s] = tmp_eta[numGood_s:] # make assoc tTh array - tTh = 2.*np.arcsin(sintht).flatten() + tTh = 2.0 * np.arcsin(sintht).flatten() tTh0 = tTh tTh0[-goodOnes_s] = np.nan - retval = (np.vstack([tTh0.flatten(), eta0.flatten(), ome0.flatten()]), - np.vstack([tTh0.flatten(), eta1.flatten(), ome1.flatten()]),) + retval = ( + np.vstack([tTh0.flatten(), eta0.flatten(), ome0.flatten()]), + np.vstack([tTh0.flatten(), eta1.flatten(), ome1.flatten()]), + ) else: retval = (ome0.flatten(), ome1.flatten()) - pass return retval -def polarRebin(thisFrame, - npdiv=2, mmPerPixel=(0.2, 0.2), convertToTTh=False, - rMat_d=I3, tVec_d=np.r_[0., 0., -1000.], - beamVec=bVec_ref, etaVec=eta_ref, - rhoRange=np.r_[20, 200], numRho=1000, - etaRange=(d2r*np.r_[-5, 355]), - numEta=36, verbose=True, log=None): +def polarRebin( + thisFrame, + npdiv=2, + mmPerPixel=(0.2, 0.2), + convertToTTh=False, + rMat_d=I3, + tVec_d=np.r_[0.0, 0.0, -1000.0], + beamVec=bVec_ref, + etaVec=eta_ref, + rhoRange=np.r_[20, 200], + numRho=1000, + etaRange=(d2r * np.r_[-5, 355]), + numEta=36, + verbose=True, + log=None, +): """ Performs polar rebinning of an input image. @@ -577,7 +593,9 @@ def polarRebin(thisFrame, startRho = rhoRange[0] stopRho = rhoRange[1] - subPixArea = 1/float(npdiv)**2 # areal rescaling for subpixel intensities + subPixArea = ( + 1 / float(npdiv) ** 2 + ) # areal rescaling for subpixel intensities # MASTER COORDINATES # - in pixel indices, UPPER LEFT PIXEL is [0, 0] --> (row, col) @@ -589,17 +607,23 @@ def polarRebin(thisFrame, # need rhos (or tThs) and etas) if convertToTTh: - dAngs = detectorXYToGvec(np.vstack([x, y]).T, - rMat_d, I3, - tVec_d, zeroVec, zeroVec, - beamVec=beamVec, etaVec=etaVec) - rho = dAngs[0][0] # this is tTh now + dAngs = detectorXYToGvec( + np.vstack([x, y]).T, + rMat_d, + I3, + tVec_d, + zeroVec, + zeroVec, + beamVec=beamVec, + etaVec=etaVec, + ) + rho = dAngs[0][0] # this is tTh now eta = dAngs[0][1] else: # in here, we are vanilla cartesian - rho = np.sqrt(x*x + y*y) + rho = np.sqrt(x * x + y * y) eta = np.arctan2(y, x) - eta = mapAngle(eta, [startEta, 2*np.pi + startEta], units='radians') + eta = mapAngle(eta, [startEta, 2 * np.pi + startEta], units='radians') # MAKE POLAR BIN CENTER ARRAY deltaEta = (stopEta - startEta) / float(numEta) @@ -621,23 +645,21 @@ def polarRebin(thisFrame, log.write(msg) else: print(msg) - pass - rhoI = startRho - 10*deltaRho - rhoF = stopRho + 10*deltaRho + rhoI = startRho - 10 * deltaRho + rhoF = stopRho + 10 * deltaRho inAnnulus = np.where((rho >= rhoI) & (rho <= rhoF))[0] for i in range(numEta): if verbose: - msg = "INFO: Processing sector %d of %d\n" % (i+1, numEta) + msg = "INFO: Processing sector %d of %d\n" % (i + 1, numEta) if log: log.write(msg) else: print(msg) - pass # import pdb;pdb.set_trace() - etaI1 = rowEta[i] - 10.5*deltaEta - etaF1 = rowEta[i] + 10.5*deltaEta + etaI1 = rowEta[i] - 10.5 * deltaEta + etaF1 = rowEta[i] + 10.5 * deltaEta tmpEta = eta[inAnnulus] inSector = np.where((tmpEta >= etaI1) & (tmpEta <= etaF1))[0] @@ -659,32 +681,42 @@ def polarRebin(thisFrame, intY = np.tile(intY.flatten(), (nptsIn, 1)).T.flatten() # expand coords using pixel subdivision - tmpX = np.tile(tmpX, (npdiv**2, 1)).flatten() \ - + (intX - 0.5)*mmPerPixel[0] - tmpY = np.tile(tmpY, (npdiv**2, 1)).flatten() \ - + (intY - 0.5)*mmPerPixel[1] + tmpX = ( + np.tile(tmpX, (npdiv**2, 1)).flatten() + + (intX - 0.5) * mmPerPixel[0] + ) + tmpY = ( + np.tile(tmpY, (npdiv**2, 1)).flatten() + + (intY - 0.5) * mmPerPixel[1] + ) tmpI = np.tile(tmpI, (npdiv**2, 1)).flatten() / subPixArea if convertToTTh: - dAngs = detectorXYToGvec(np.vstack([tmpX, tmpY]).T, - rMat_d, I3, - tVec_d, zeroVec, zeroVec, - beamVec=beamVec, etaVec=etaVec) - tmpRho = dAngs[0][0] # this is tTh now + dAngs = detectorXYToGvec( + np.vstack([tmpX, tmpY]).T, + rMat_d, + I3, + tVec_d, + zeroVec, + zeroVec, + beamVec=beamVec, + etaVec=etaVec, + ) + tmpRho = dAngs[0][0] # this is tTh now tmpEta = dAngs[0][1] else: - tmpRho = np.sqrt(tmpX*tmpX + tmpY*tmpY) + tmpRho = np.sqrt(tmpX * tmpX + tmpY * tmpY) tmpEta = np.arctan2(tmpY, tmpX) tmpEta = mapAngle( - tmpEta, [startEta, 2*np.pi + startEta], - units='radians' + tmpEta, [startEta, 2 * np.pi + startEta], units='radians' ) - etaI2 = rowEta[i] - 0.5*deltaEta - etaF2 = rowEta[i] + 0.5*deltaEta + etaI2 = rowEta[i] - 0.5 * deltaEta + etaF2 = rowEta[i] + 0.5 * deltaEta - inSector2 = ((tmpRho >= startRho) & (tmpRho <= stopRho)) \ - & ((tmpEta >= etaI2) & (tmpEta <= etaF2)) + inSector2 = ((tmpRho >= startRho) & (tmpRho <= stopRho)) & ( + (tmpEta >= etaI2) & (tmpEta <= etaF2) + ) tmpRho = tmpRho[inSector2] tmpI = tmpI[inSector2] @@ -692,14 +724,14 @@ def polarRebin(thisFrame, binId = np.floor((tmpRho - startRho) / deltaRho) nSubpixelsIn = len(binId) - if (nSubpixelsIn > 0): + if nSubpixelsIn > 0: tmpI = sparse.csc_matrix( (tmpI, (binId, np.arange(nSubpixelsIn))), - shape=(numRho, nSubpixelsIn) + shape=(numRho, nSubpixelsIn), ) binId = sparse.csc_matrix( (np.ones(nSubpixelsIn), (binId, np.arange(nSubpixelsIn))), - shape=(numRho, nSubpixelsIn) + shape=(numRho, nSubpixelsIn), ) # Normalized contribution to the ith sector's radial bins @@ -707,8 +739,9 @@ def polarRebin(thisFrame, whereNZ = np.asarray( np.not_equal(polImg['intensity'][i, :], binIdSum) ) - polImg['intensity'][i, whereNZ] = \ + polImg['intensity'][i, whereNZ] = ( np.asarray(tmpI.sum(1))[whereNZ].flatten() / binIdSum[whereNZ] + ) return polImg @@ -731,8 +764,8 @@ def arccosSafe(temp): print("attempt to take arccos of %s" % temp, file=sys.stderr) raise RuntimeError("unrecoverable error") - gte1 = temp >= 1. - lte1 = temp <= -1. + gte1 = temp >= 1.0 + lte1 = temp <= -1.0 temp[gte1] = 1 temp[lte1] = -1 @@ -766,7 +799,7 @@ def angularDifference(angList0, angList1, units=angularUnits): period = periodDict[units] # module-level # take difference as arrays diffAngles = np.atleast_1d(angList0) - np.atleast_1d(angList1) - return abs(np.remainder(diffAngles + 0.5*period, period) - 0.5*period) + return abs(np.remainder(diffAngles + 0.5 * period, period) - 0.5 * period) def mapAngle(ang, *args, **kwargs): @@ -783,14 +816,16 @@ def mapAngle(ang, *args, **kwargs): if kwargKeys[iArg] == 'units': units = kwargs[kwargKeys[iArg]] else: - raise RuntimeError("Unknown keyword argument: " - + str(kwargKeys[iArg])) + raise RuntimeError( + "Unknown keyword argument: " + str(kwargKeys[iArg]) + ) try: period = periodDict[units.lower()] - except(KeyError): - raise RuntimeError("unknown angular units: " - + str(kwargs[kwargKeys[iArg]])) + except KeyError: + raise RuntimeError( + "unknown angular units: " + str(kwargs[kwargKeys[iArg]]) + ) ang = np.asarray(ang, dtype=float) @@ -811,15 +846,13 @@ def mapAngle(ang, *args, **kwargs): while lbi.sum() > 0: ang[lbi] = ang[lbi] + period lbi = ang < lb - pass ubi = ang > ub while ubi.sum() > 0: ang[ubi] = ang[ubi] - period ubi = ang > ub - pass retval = ang else: - retval = np.mod(ang + 0.5*period, period) - 0.5*period + retval = np.mod(ang + 0.5 * period, period) - 0.5 * period return retval @@ -880,9 +913,11 @@ def columnNorm(a): normalize array of column vectors (hstacked, axis = 0) """ if len(a.shape) > 2: - raise RuntimeError("incorrect shape: arg must be 1-d or 2-d, " - + "yours is %d" % (len(a.shape))) - cnrma = np.sqrt(sum(np.asarray(a)**2, 0)) + raise RuntimeError( + "incorrect shape: arg must be 1-d or 2-d, " + + "yours is %d" % (len(a.shape)) + ) + cnrma = np.sqrt(sum(np.asarray(a) ** 2, 0)) return cnrma @@ -891,80 +926,63 @@ def rowNorm(a): normalize array of row vectors (vstacked, axis = 1) """ if len(a.shape) > 2: - raise RuntimeError("incorrect shape: arg must be 1-d or 2-d, " - + "yours is %d" % (len(a.shape))) - cnrma = np.sqrt(sum(np.asarray(a)**2, 1)) + raise RuntimeError( + "incorrect shape: arg must be 1-d or 2-d, " + + "yours is %d" % (len(a.shape)) + ) + cnrma = np.sqrt(sum(np.asarray(a) ** 2, 1)) return cnrma -if USE_NUMBA: - @numba.njit(nogil=True, cache=True) - def _unitVectorSingle(a, b): - n = a.shape[0] +@numba.njit(nogil=True, cache=True) +def _unitVectorSingle(a, b): + n = a.shape[0] + nrm = 0.0 + for i in range(n): + nrm += a[i] * a[i] + nrm = np.sqrt(nrm) + # prevent divide by zero + if nrm > epsf: + for i in range(n): + b[i] = a[i] / nrm + else: + for i in range(n): + b[i] = a[i] + + +@numba.njit(nogil=True, cache=True) +def _unitVectorMulti(a, b): + n = a.shape[0] + m = a.shape[1] + for j in range(m): nrm = 0.0 for i in range(n): - nrm += a[i]*a[i] + nrm += a[i, j] * a[i, j] nrm = np.sqrt(nrm) # prevent divide by zero if nrm > epsf: for i in range(n): - b[i] = a[i] / nrm + b[i, j] = a[i, j] / nrm else: for i in range(n): - b[i] = a[i] - - @numba.njit(nogil=True, cache=True) - def _unitVectorMulti(a, b): - n = a.shape[0] - m = a.shape[1] - for j in range(m): - nrm = 0.0 - for i in range(n): - nrm += a[i, j]*a[i, j] - nrm = np.sqrt(nrm) - # prevent divide by zero - if nrm > epsf: - for i in range(n): - b[i, j] = a[i, j] / nrm - else: - for i in range(n): - b[i, j] = a[i, j] - - def unitVector(a): - """ - normalize array of column vectors (hstacked, axis = 0) - """ - result = np.empty_like(a) - if a.ndim == 1: - _unitVectorSingle(a, result) - elif a.ndim == 2: - _unitVectorMulti(a, result) - else: - raise ValueError("incorrect arg shape; must be 1-d or 2-d, " - + "yours is %d-d" % (a.ndim)) - return result - -else: # not USE_NUMBA - def unitVector(a): - """ - normalize array of column vectors (hstacked, axis = 0) - """ - assert a.ndim in [1, 2], \ - "incorrect arg shape; must be 1-d or 2-d, yours is %d-d" \ - % (a.ndim) + b[i, j] = a[i, j] - m = a.shape[0] - n = 1 - nrm = np.tile(np.sqrt(sum(np.asarray(a)**2, 0)), (m, n)) - - # prevent divide by zero - zchk = nrm <= epsf - nrm[zchk] = 1. - - nrma = a/nrm - - return nrma +def unitVector(a): + """ + normalize array of column vectors (hstacked, axis = 0) + """ + result = np.empty_like(a) + if a.ndim == 1: + _unitVectorSingle(a, result) + elif a.ndim == 2: + _unitVectorMulti(a, result) + else: + raise ValueError( + "incorrect arg shape; must be 1-d or 2-d, " + + "yours is %d-d" % (a.ndim) + ) + return result def makeDetectorRotMat(tiltAngles): @@ -984,19 +1002,13 @@ def makeDetectorRotMat(tiltAngles): sin_gZ = np.sin(tiltAngles[2]) rotXl = np.array( - [[1., 0., 0.], - [0., cos_gX, -sin_gX], - [0., sin_gX, cos_gX]] + [[1.0, 0.0, 0.0], [0.0, cos_gX, -sin_gX], [0.0, sin_gX, cos_gX]] ) rotYl = np.array( - [[cos_gY, 0., sin_gY], - [0., 1., 0.], - [-sin_gY, 0., cos_gY]] + [[cos_gY, 0.0, sin_gY], [0.0, 1.0, 0.0], [-sin_gY, 0.0, cos_gY]] ) rotZl = np.array( - [[cos_gZ, -sin_gZ, 0.], - [sin_gZ, cos_gZ, 0.], - [0., 0., 1.]] + [[cos_gZ, -sin_gZ, 0.0], [sin_gZ, cos_gZ, 0.0], [0.0, 0.0, 1.0]] ) return np.dot(rotZl, np.dot(rotYl, rotXl)) @@ -1011,17 +1023,9 @@ def makeOscillRotMat(oscillAngles): come = np.cos(oscillAngles[1]) some = np.sin(oscillAngles[1]) - rchi = np.array( - [[1., 0., 0.], - [0., cchi, -schi], - [0., schi, cchi]] - ) + rchi = np.array([[1.0, 0.0, 0.0], [0.0, cchi, -schi], [0.0, schi, cchi]]) - rome = np.array( - [[come, 0., some], - [0., 1., 0.], - [-some, 0., come]] - ) + rome = np.array([[come, 0.0, some], [0.0, 1.0, 0.0], [-some, 0.0, come]]) return np.dot(rchi, rome) @@ -1045,13 +1049,18 @@ def makeRotMatOfExpMap(expMap): phi = np.norm(expMap) if phi > epsf: wMat = np.array( - [[0., -expMap[2], expMap[1]], - [expMap[2], 0., -expMap[0]], - [-expMap[1], expMap[0], 0.]]) + [ + [0.0, -expMap[2], expMap[1]], + [expMap[2], 0.0, -expMap[0]], + [-expMap[1], expMap[0], 0.0], + ] + ) - rMat = I3 \ - + (np.sin(phi) / phi) * wMat \ - + ((1. - np.cos(phi)) / (phi*phi)) * np.dot(wMat, wMat) + rMat = ( + I3 + + (np.sin(phi) / phi) * wMat + + ((1.0 - np.cos(phi)) / (phi * phi)) * np.dot(wMat, wMat) + ) else: rMat = I3 @@ -1059,74 +1068,52 @@ def makeRotMatOfExpMap(expMap): def makeBinaryRotMat(axis): - """ - """ + """ """ n = np.asarray(axis).flatten() assert len(n) == 3, 'Axis input does not have 3 components' - return 2*np.dot(n.reshape(3, 1), n.reshape(1, 3)) - I3 - - -if USE_NUMBA: - @numba.njit(nogil=True, cache=True) - def _makeEtaFrameRotMat(bHat_l, eHat_l, out): - # bHat_l and eHat_l CANNOT have 0 magnitude! - # must catch this case as well as colinear bHat_l/eHat_l elsewhere... - bHat_mag = np.sqrt(bHat_l[0]**2 + bHat_l[1]**2 + bHat_l[2]**2) + return 2 * np.dot(n.reshape(3, 1), n.reshape(1, 3)) - I3 - # assign Ze as -bHat_l - for i in range(3): - out[i, 2] = -bHat_l[i] / bHat_mag - # find Ye as Ze ^ eHat_l - Ye0 = out[1, 2]*eHat_l[2] - eHat_l[1]*out[2, 2] - Ye1 = out[2, 2]*eHat_l[0] - eHat_l[2]*out[0, 2] - Ye2 = out[0, 2]*eHat_l[1] - eHat_l[0]*out[1, 2] +@numba.njit(nogil=True, cache=True) +def _makeEtaFrameRotMat(bHat_l, eHat_l, out): + # bHat_l and eHat_l CANNOT have 0 magnitude! + # must catch this case as well as colinear bHat_l/eHat_l elsewhere... + bHat_mag = np.sqrt(bHat_l[0] ** 2 + bHat_l[1] ** 2 + bHat_l[2] ** 2) - Ye_mag = np.sqrt(Ye0**2 + Ye1**2 + Ye2**2) + # assign Ze as -bHat_l + for i in range(3): + out[i, 2] = -bHat_l[i] / bHat_mag - out[0, 1] = Ye0 / Ye_mag - out[1, 1] = Ye1 / Ye_mag - out[2, 1] = Ye2 / Ye_mag + # find Ye as Ze ^ eHat_l + Ye0 = out[1, 2] * eHat_l[2] - eHat_l[1] * out[2, 2] + Ye1 = out[2, 2] * eHat_l[0] - eHat_l[2] * out[0, 2] + Ye2 = out[0, 2] * eHat_l[1] - eHat_l[0] * out[1, 2] - # find Xe as Ye ^ Ze - out[0, 0] = out[1, 1]*out[2, 2] - out[1, 2]*out[2, 1] - out[1, 0] = out[2, 1]*out[0, 2] - out[2, 2]*out[0, 1] - out[2, 0] = out[0, 1]*out[1, 2] - out[0, 2]*out[1, 1] + Ye_mag = np.sqrt(Ye0**2 + Ye1**2 + Ye2**2) - def makeEtaFrameRotMat(bHat_l, eHat_l): - """ - make eta basis COB matrix with beam antiparallel with Z + out[0, 1] = Ye0 / Ye_mag + out[1, 1] = Ye1 / Ye_mag + out[2, 1] = Ye2 / Ye_mag - takes components from ETA frame to LAB + # find Xe as Ye ^ Ze + out[0, 0] = out[1, 1] * out[2, 2] - out[1, 2] * out[2, 1] + out[1, 0] = out[2, 1] * out[0, 2] - out[2, 2] * out[0, 1] + out[2, 0] = out[0, 1] * out[1, 2] - out[0, 2] * out[1, 1] - **NO EXCEPTION HANDLING FOR COLINEAR ARGS IN NUMBA VERSION! - ...put checks for non-zero magnitudes and non-colinearity in wrapper? - """ - result = np.empty((3, 3)) - _makeEtaFrameRotMat(bHat_l.reshape(3), eHat_l.reshape(3), result) - return result - -else: # not USE_NUMBA - def makeEtaFrameRotMat(bHat_l, eHat_l): - """ - make eta basis COB matrix with beam antiparallel with Z +def makeEtaFrameRotMat(bHat_l, eHat_l): + """ + make eta basis COB matrix with beam antiparallel with Z - takes components from ETA frame to LAB - """ - # normalize input - bHat_l = unitVector(bHat_l.reshape(3, 1)) - eHat_l = unitVector(eHat_l.reshape(3, 1)) + takes components from ETA frame to LAB - # find Ye as cross(eHat_l, bHat_l), normalize if kosher - Ye = np.cross(eHat_l.flatten(), bHat_l.flatten()) - if np.sqrt(np.sum(Ye*Ye)) < 1e-8: - raise RuntimeError("bHat_l and eHat_l must NOT be colinear!") - Ye = unitVector(Ye.reshape(3, 1)) + **NO EXCEPTION HANDLING FOR COLINEAR ARGS IN NUMBA VERSION! - # find Xe as cross(bHat_l, Ye) - Xe = np.cross(bHat_l.flatten(), Ye.flatten()).reshape(3, 1) - return np.hstack([Xe, Ye, -bHat_l]) + ...put checks for non-zero magnitudes and non-colinearity in wrapper? + """ + result = np.empty((3, 3)) + _makeEtaFrameRotMat(bHat_l.reshape(3), eHat_l.reshape(3), result) + return result def angles_in_range(angles, starts, stops, degrees=True): @@ -1138,8 +1125,8 @@ def angles_in_range(angles, starts, stops, degrees=True): OPTIONAL ARGS: *degrees* - [True] angles & ranges in degrees (or radians) -""" - TAU = 360.0 if degrees else 2*np.pi + """ + TAU = 360.0 if degrees else 2 * np.pi nw = len(starts) na = len(angles) in_range = np.zeros((na), dtype=bool) @@ -1164,13 +1151,14 @@ def validateAngleRanges(angList, startAngs, stopAngs, ccw=True): the same; we treat them as implying 2*pi having been mapped """ # Prefer ravel over flatten because flatten never skips the copy - angList = np.asarray(angList).ravel() # needs to have len + angList = np.asarray(angList).ravel() # needs to have len startAngs = np.asarray(startAngs).ravel() # needs to have len - stopAngs = np.asarray(stopAngs).ravel() # needs to have len + stopAngs = np.asarray(stopAngs).ravel() # needs to have len n_ranges = len(startAngs) - assert len(stopAngs) == n_ranges, \ - "length of min and max angular limits must match!" + assert ( + len(stopAngs) == n_ranges + ), "length of min and max angular limits must match!" # to avoid warnings in >=, <= later down, mark nans; # need these to trick output to False in the case of nan input @@ -1179,7 +1167,7 @@ def validateAngleRanges(angList, startAngs, stopAngs, ccw=True): reflInRange = np.zeros(angList.shape, dtype=bool) # bin length for chunking - binLen = np.pi / 2. + binLen = np.pi / 2.0 # in plane vectors defining wedges x0 = np.vstack([np.cos(startAngs), np.sin(startAngs)]) @@ -1187,37 +1175,35 @@ def validateAngleRanges(angList, startAngs, stopAngs, ccw=True): # dot products dp = np.sum(x0 * x1, axis=0) - if np.any(dp >= 1. - sqrt_epsf) and n_ranges > 1: + if np.any(dp >= 1.0 - sqrt_epsf) and n_ranges > 1: # ambiguous case raise RuntimeError( "At least one of your ranges is alread 360 degrees!" ) - elif dp[0] >= 1. - sqrt_epsf and n_ranges == 1: + elif dp[0] >= 1.0 - sqrt_epsf and n_ranges == 1: # trivial case! reflInRange = np.ones(angList.shape, dtype=bool) reflInRange[nan_mask] = False else: # solve for arc lengths # ...note: no zeros should have made it here - a = x0[0, :]*x1[1, :] - x0[1, :]*x1[0, :] - b = x0[0, :]*x1[0, :] + x0[1, :]*x1[1, :] + a = x0[0, :] * x1[1, :] - x0[1, :] * x1[0, :] + b = x0[0, :] * x1[0, :] + x0[1, :] * x1[1, :] phi = np.arctan2(b, a) - arclen = 0.5*np.pi - phi # these are clockwise + arclen = 0.5 * np.pi - phi # these are clockwise cw_phis = arclen < 0 - arclen[cw_phis] = 2*np.pi + arclen[cw_phis] # all positive (CW) now + arclen[cw_phis] = 2 * np.pi + arclen[cw_phis] # all positive (CW) now if not ccw: - arclen = 2*np.pi - arclen + arclen = 2 * np.pi - arclen - if sum(arclen) > 2*np.pi: - raise RuntimeWarning( - "Specified angle ranges sum to > 360 degrees" - ) + if sum(arclen) > 2 * np.pi: + raise RuntimeWarning("Specified angle ranges sum to > 360 degrees") # check that there are no more thandp = np.zeros(n_ranges) for i in range(n_ranges): # number or subranges using 'binLen' - numSubranges = int(np.ceil(arclen[i]/binLen)) + numSubranges = int(np.ceil(arclen[i] / binLen)) # check remaider binrem = np.remainder(arclen[i], binLen) @@ -1234,23 +1220,25 @@ def validateAngleRanges(angList, startAngs, stopAngs, ccw=True): # Create sub ranges on the fly to avoid ambiguity in dot product # for wedges >= 180 degrees subRanges = np.array( - [startAngs[i] + binLen*j for j in range(numSubranges)] - + [startAngs[i] + binLen*(numSubranges - 1) + finalBinLen] + [startAngs[i] + binLen * j for j in range(numSubranges)] + + [startAngs[i] + binLen * (numSubranges - 1) + finalBinLen] ) for k in range(numSubranges): zStart = _z_project(angList, subRanges[k]) zStop = _z_project(angList, subRanges[k + 1]) if ccw: - zStart[nan_mask] = 999. - zStop[nan_mask] = -999. - reflInRange = reflInRange | \ - np.logical_and(zStart <= 0, zStop >= 0) + zStart[nan_mask] = 999.0 + zStop[nan_mask] = -999.0 + reflInRange = reflInRange | np.logical_and( + zStart <= 0, zStop >= 0 + ) else: - zStart[nan_mask] = -999. - zStop[nan_mask] = 999. - reflInRange = reflInRange | \ - np.logical_and(zStart >= 0, zStop <= 0) + zStart[nan_mask] = -999.0 + zStop[nan_mask] = 999.0 + reflInRange = reflInRange | np.logical_and( + zStart >= 0, zStop <= 0 + ) return reflInRange @@ -1277,33 +1265,50 @@ def rotate_vecs_about_axis(angle, axis, vecs): angle = np.atleast_1d(angle) # quaternion components - q0 = np.cos(0.5*angle) - q1 = np.sin(0.5*angle) + q0 = np.cos(0.5 * angle) + q1 = np.sin(0.5 * angle) qv = np.tile(q1, (3, 1)) * axis # component perpendicular to axes (inherits shape of vecs) - vp0 = vecs[0, :] - axis[0, :]*axis[0, :]*vecs[0, :] \ - - axis[0, :]*axis[1, :]*vecs[1, :] - axis[0, :]*axis[2, :]*vecs[2, :] - vp1 = vecs[1, :] - axis[1, :]*axis[1, :]*vecs[1, :] \ - - axis[1, :]*axis[0, :]*vecs[0, :] - axis[1, :]*axis[2, :]*vecs[2, :] - vp2 = vecs[2, :] - axis[2, :]*axis[2, :]*vecs[2, :] \ - - axis[2, :]*axis[0, :]*vecs[0, :] - axis[2, :]*axis[1, :]*vecs[1, :] + vp0 = ( + vecs[0, :] + - axis[0, :] * axis[0, :] * vecs[0, :] + - axis[0, :] * axis[1, :] * vecs[1, :] + - axis[0, :] * axis[2, :] * vecs[2, :] + ) + vp1 = ( + vecs[1, :] + - axis[1, :] * axis[1, :] * vecs[1, :] + - axis[1, :] * axis[0, :] * vecs[0, :] + - axis[1, :] * axis[2, :] * vecs[2, :] + ) + vp2 = ( + vecs[2, :] + - axis[2, :] * axis[2, :] * vecs[2, :] + - axis[2, :] * axis[0, :] * vecs[0, :] + - axis[2, :] * axis[1, :] * vecs[1, :] + ) # dot product with components along; cross product with components normal - qdota = (axis[0, :] * vecs[0, :] + - axis[1, :] * vecs[1, :] + - axis[2, :] * vecs[2, :])\ - * (axis[0, :] * qv[0, :] + - axis[1, :] * qv[1, :] + - axis[2, :] * qv[2, :]) - qcrossn = np.vstack([qv[1, :]*vp2 - qv[2, :]*vp1, - qv[2, :]*vp0 - qv[0, :]*vp2, - qv[0, :]*vp1 - qv[1, :]*vp0]) + qdota = ( + axis[0, :] * vecs[0, :] + + axis[1, :] * vecs[1, :] + + axis[2, :] * vecs[2, :] + ) * (axis[0, :] * qv[0, :] + axis[1, :] * qv[1, :] + axis[2, :] * qv[2, :]) + qcrossn = np.vstack( + [ + qv[1, :] * vp2 - qv[2, :] * vp1, + qv[2, :] * vp0 - qv[0, :] * vp2, + qv[0, :] * vp1 - qv[1, :] * vp0, + ] + ) # quaternion formula - v_rot = np.tile(q0*q0 - q1*q1, (3, 1)) * vecs \ - + 2. * np.tile(qdota, (3, 1)) * qv \ - + 2. * np.tile(q0, (3, 1)) * qcrossn + v_rot = ( + np.tile(q0 * q0 - q1 * q1, (3, 1)) * vecs + + 2.0 * np.tile(qdota, (3, 1)) * qv + + 2.0 * np.tile(q0, (3, 1)) * qcrossn + ) return v_rot @@ -1336,24 +1341,27 @@ def quat_product_matrix(q, mult='right'): """ if mult == 'right': qmat = np.array( - [[q[0], -q[1], -q[2], -q[3]], - [q[1], q[0], q[3], -q[2]], - [q[2], -q[3], q[0], q[1]], - [q[3], q[2], -q[1], q[0]]] + [ + [q[0], -q[1], -q[2], -q[3]], + [q[1], q[0], q[3], -q[2]], + [q[2], -q[3], q[0], q[1]], + [q[3], q[2], -q[1], q[0]], + ] ) elif mult == 'left': qmat = np.array( - [[q[0], -q[1], -q[2], -q[3]], - [q[1], q[0], -q[3], q[2]], - [q[2], q[3], q[0], -q[1]], - [q[3], -q[2], q[1], q[0]]] + [ + [q[0], -q[1], -q[2], -q[3]], + [q[1], q[0], -q[3], q[2]], + [q[2], q[3], q[0], -q[1]], + [q[3], -q[2], q[1], q[0]], + ] ) return qmat def quat_distance(q1, q2, qsym): - """ - """ + """ """ # qsym from PlaneData objects are (4, nsym) # convert symmetries to (4, 4) qprod matrices nsym = qsym.shape[1] @@ -1363,8 +1371,7 @@ def quat_distance(q1, q2, qsym): # inverse of q1 in matrix form q1i = quat_product_matrix( - np.r_[1, -1, -1, -1] * np.atleast_1d(q1).flatten(), - mult='right' + np.r_[1, -1, -1, -1] * np.atleast_1d(q1).flatten(), mult='right' ) # Do R * Gc, store as vstacked equivalent quaternions (nsym, 4) diff --git a/hexrd/utils/decorators.py b/hexrd/utils/decorators.py index f6716d4fd..056c738e4 100644 --- a/hexrd/utils/decorators.py +++ b/hexrd/utils/decorators.py @@ -13,8 +13,6 @@ import numpy as np import xxhash -from hexrd.constants import USE_NUMBA - def undoc(func): """Mark a function or class as undocumented. @@ -117,31 +115,6 @@ def convert(x): return tuple(map(convert, items)) -def numba_njit_if_available(func=None, *args, **kwargs): - # Forwards decorator to numba.njit if numba is available - # Otherwise, does nothing. - - def decorator(func): - if USE_NUMBA: - import numba - return numba.njit(*args, **kwargs)(func) - else: - # Do nothing... - return func - - if func is None: - return decorator - else: - return decorator(func) - - -# Also expose prange depending on whether we have numba or not -if USE_NUMBA: - from numba import prange -else: - prange = range - - # A decorator to limit the number of numba threads def limit_numba_threads(max_threads): def decorator(func): diff --git a/hexrd/valunits.py b/hexrd/valunits.py index 112310bd6..3dd0b9462 100644 --- a/hexrd/valunits.py +++ b/hexrd/valunits.py @@ -70,8 +70,6 @@ class UNames(object): keV = 'keV' J = 'J' - pass - cv_dict = { (UNames.degrees, UNames.radians): math.pi/180.0, @@ -130,7 +128,6 @@ def __init__(self, name, unitType, value, unit): # # Original checked if unit is of unitType # - pass # end init def __str__(self): tmpl = """item named "%s" representing %g %s""" @@ -336,4 +333,3 @@ def testConversions(): print((' in ', u, ': ', v.getVal(u))) return testConversions() - pass # end of tests diff --git a/hexrd/wppf/derivatives.py b/hexrd/wppf/derivatives.py index d0cc7c682..d1e4cce33 100644 --- a/hexrd/wppf/derivatives.py +++ b/hexrd/wppf/derivatives.py @@ -1,6 +1,7 @@ import numpy as np -from hexrd.utils.decorators import numba_njit_if_available +from numba import njit from hexrd.wppf.peakfunctions import _unit_gaussian, _unit_lorentzian + """ naming convention for the derivative is as follows: _d__ @@ -18,102 +19,127 @@ """ -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_pvfcj_fwhm(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_pvfcj_tth(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_fwhm_U(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_fwhm_V(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_fwhm_W(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_fwhm_P(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_fwhm_X(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_fwhm_Y(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_fwhm_Xe(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_fwhm_Ye(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_fwhm_Xs(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_fwhm_HL(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_fwhm_SL(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_pvfcj_scale(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_pvfcj_phase_fraction(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_pvfcj_trns(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_pvfcj_shft(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_pvfcj_zero_error(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_pvfcj_shkls(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_pvfcj_a(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_pvfcj_b(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_pvfcj_c(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_pvfcj_alpha(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_pvfcj_beta(): pass -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _d_pvfcj_gamma(): pass diff --git a/hexrd/wppf/peakfunctions.py b/hexrd/wppf/peakfunctions.py index af5abaf27..0eecdce7d 100644 --- a/hexrd/wppf/peakfunctions.py +++ b/hexrd/wppf/peakfunctions.py @@ -28,22 +28,17 @@ import numpy as np import copy from hexrd import constants -from hexrd.utils.decorators import numba_njit_if_available -from numba import vectorize, float64 +from numba import vectorize, float64, njit, prange from hexrd.fitting.peakfunctions import erfc, exp1exp -# from scipy.special import erfc, exp1 -if constants.USE_NUMBA: - from numba import prange -else: - prange = range +# from scipy.special import erfc, exp1 # addr = get_cython_function_address("scipy.special.cython_special", "exp1") # functype = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_double) # exp1_fn = functype(addr) gauss_width_fact = constants.sigma_to_fwhm -lorentz_width_fact = 2. +lorentz_width_fact = 2.0 # FIXME: we need this for the time being to be able to parse multipeak fitting # results; need to wrap all this up in a class in the future! @@ -51,19 +46,16 @@ 'gaussian': 3, 'lorentzian': 3, 'pvoigt': 4, - 'split_pvoigt': 6 + 'split_pvoigt': 6, } """ Calgliotti and Lorentzian FWHM functions """ -@numba_njit_if_available(cache=True, nogil=True) -def _gaussian_fwhm(uvw, - P, - gamma_ani_sqr, - eta_mixing, - tth, - dsp): + + +@njit(cache=True, nogil=True) +def _gaussian_fwhm(uvw, P, gamma_ani_sqr, eta_mixing, tth, dsp): """ @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov @DATE: 05/20/2020 SS 1.0 original @@ -76,24 +68,19 @@ def _gaussian_fwhm(uvw, dsp d-spacing """ U, V, W = uvw - th = np.radians(0.5*tth) + th = np.radians(0.5 * tth) tanth = np.tan(th) - cth2 = np.cos(th)**2.0 - sig2_ani = gamma_ani_sqr*(1.-eta_mixing)**2*dsp**4 - sigsqr = (U+sig2_ani) * tanth**2 + V * tanth + W + P/cth2 - if(sigsqr <= 0.): + cth2 = np.cos(th) ** 2.0 + sig2_ani = gamma_ani_sqr * (1.0 - eta_mixing) ** 2 * dsp**4 + sigsqr = (U + sig2_ani) * tanth**2 + V * tanth + W + P / cth2 + if sigsqr <= 0.0: sigsqr = 1.0e-12 - return np.sqrt(sigsqr)*1e-2 + return np.sqrt(sigsqr) * 1e-2 -@numba_njit_if_available(cache=True, nogil=True) -def _lorentzian_fwhm(xy, - xy_sf, - gamma_ani_sqr, - eta_mixing, - tth, - dsp): +@njit(cache=True, nogil=True) +def _lorentzian_fwhm(xy, xy_sf, gamma_ani_sqr, eta_mixing, tth, dsp): """ @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov @DATE: 07/20/2020 SS 1.0 original @@ -108,14 +95,15 @@ def _lorentzian_fwhm(xy, else regular broadening """ X, Y = xy - th = np.radians(0.5*tth) + th = np.radians(0.5 * tth) tanth = np.tan(th) cth = np.cos(th) - sig_ani = np.sqrt(gamma_ani_sqr)*eta_mixing*dsp**2 - gamma = (X+xy_sf)/cth + (Y+sig_ani)*tanth - return gamma*1e-2 + sig_ani = np.sqrt(gamma_ani_sqr) * eta_mixing * dsp**2 + gamma = (X + xy_sf) / cth + (Y + sig_ani) * tanth + return gamma * 1e-2 + -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def _anisotropic_peak_broadening(shkl, hkl): """ this function generates the broadening as @@ -131,36 +119,51 @@ def _anisotropic_peak_broadening(shkl, hkl): "s310", "s103", "s031", "s130", "s301", "s013", "s211", "s121", "s112"] """ - h,k,l = hkl - gamma_sqr = (shkl[0]*h**4 + - shkl[1]*k**4 + - shkl[2]*l**4 + - 3.0*(shkl[3]*(h*k)**2 + - shkl[4]*(h*l)**2 + - shkl[5]*(k*l)**2)+ - 2.0*(shkl[6]*k*h**3 + - shkl[7]*h*l**3 + - shkl[8]*l*k**3 + - shkl[9]*h*k**3 + - shkl[10]*l*h**3 + - shkl[11]*k*l**3) + - 4.0*(shkl[12]*k*l*h**2 + - shkl[13]*h*l*k**2 + - shkl[14]*h*k*l**2)) + # l_val is just l, but l is an ambiguous variable name, looks like I + h, k, l_val = hkl + gamma_sqr = ( + shkl[0] * h**4 + + shkl[1] * k**4 + + shkl[2] * l_val**4 + + 3.0 + * ( + shkl[3] * (h * k) ** 2 + + shkl[4] * (h * l_val) ** 2 + + shkl[5] * (k * l_val) ** 2 + ) + + 2.0 + * ( + shkl[6] * k * h**3 + + shkl[7] * h * l_val**3 + + shkl[8] * l_val * k**3 + + shkl[9] * h * k**3 + + shkl[10] * l_val * h**3 + + shkl[11] * k * l_val**3 + ) + + 4.0 + * ( + shkl[12] * k * l_val * h**2 + + shkl[13] * h * l_val * k**2 + + shkl[14] * h * k * l_val**2 + ) + ) return gamma_sqr + def _anisotropic_gaussian_component(gamma_sqr, eta_mixing): """ gaussian component in anisotropic broadening """ - return gamma_sqr*(1. - eta_mixing)**2 + return gamma_sqr * (1.0 - eta_mixing) ** 2 + def _anisotropic_lorentzian_component(gamma_sqr, eta_mixing): """ lorentzian component in anisotropic broadening """ - return np.sqrt(gamma_sqr)*eta_mixing + return np.sqrt(gamma_sqr) * eta_mixing + # ============================================================================= # 1-D Gaussian Functions @@ -168,7 +171,7 @@ def _anisotropic_lorentzian_component(gamma_sqr, eta_mixing): # Split the unit gaussian so this can be called for 2d and 3d functions -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def _unit_gaussian(p, x): """ Required Arguments: @@ -181,16 +184,17 @@ def _unit_gaussian(p, x): x0 = p[0] FWHM = p[1] - sigma = FWHM/gauss_width_fact + sigma = FWHM / gauss_width_fact - f = np.exp(-(x-x0)**2/(2.*sigma**2.)) + f = np.exp(-((x - x0) ** 2) / (2.0 * sigma**2.0)) return f + # ============================================================================= # 1-D Lorentzian Functions # ============================================================================= # Split the unit function so this can be called for 2d and 3d functions -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def _unit_lorentzian(p, x): """ Required Arguments: @@ -203,12 +207,13 @@ def _unit_lorentzian(p, x): x0 = p[0] FWHM = p[1] - gamma = FWHM/lorentz_width_fact + gamma = FWHM / lorentz_width_fact - f = gamma / ((x-x0)**2 + gamma**2) + f = gamma / ((x - x0) ** 2 + gamma**2) return f -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _mixing_factor_pv(fwhm_g, fwhm_l): """ @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, @@ -220,99 +225,95 @@ def _mixing_factor_pv(fwhm_g, fwhm_l): @DETAILS: calculates the mixing factor eta to best approximate voight peak shapes """ - fwhm = fwhm_g**5 + 2.69269 * fwhm_g**4 * fwhm_l + \ - 2.42843 * fwhm_g**3 * fwhm_l**2 + \ - 4.47163 * fwhm_g**2 * fwhm_l**3 +\ - 0.07842 * fwhm_g * fwhm_l**4 +\ - fwhm_l**5 + fwhm = ( + fwhm_g**5 + + 2.69269 * fwhm_g**4 * fwhm_l + + 2.42843 * fwhm_g**3 * fwhm_l**2 + + 4.47163 * fwhm_g**2 * fwhm_l**3 + + 0.07842 * fwhm_g * fwhm_l**4 + + fwhm_l**5 + ) fwhm = fwhm**0.20 - eta = 1.36603 * (fwhm_l/fwhm) - \ - 0.47719 * (fwhm_l/fwhm)**2 + \ - 0.11116 * (fwhm_l/fwhm)**3 - if eta < 0.: - eta = 0. - elif eta > 1.: - eta = 1. + eta = ( + 1.36603 * (fwhm_l / fwhm) + - 0.47719 * (fwhm_l / fwhm) ** 2 + + 0.11116 * (fwhm_l / fwhm) ** 3 + ) + if eta < 0.0: + eta = 0.0 + elif eta > 1.0: + eta = 1.0 return eta, fwhm -@numba_njit_if_available(cache=True, nogil=True) -def pvoight_wppf(uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - tth, - dsp, - hkl, - tth_list): + +@njit(cache=True, nogil=True) +def pvoight_wppf(uvw, p, xy, xy_sf, shkl, eta_mixing, tth, dsp, hkl, tth_list): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/22/2021 SS 1.0 original @details pseudo voight peak profile for WPPF """ - gamma_ani_sqr = _anisotropic_peak_broadening( - shkl, hkl) - fwhm_g = _gaussian_fwhm(uvw, p, - gamma_ani_sqr, - eta_mixing, - tth, dsp) - fwhm_l = _lorentzian_fwhm(xy, xy_sf, - gamma_ani_sqr, eta_mixing, - tth, dsp) + gamma_ani_sqr = _anisotropic_peak_broadening(shkl, hkl) + fwhm_g = _gaussian_fwhm(uvw, p, gamma_ani_sqr, eta_mixing, tth, dsp) + fwhm_l = _lorentzian_fwhm(xy, xy_sf, gamma_ani_sqr, eta_mixing, tth, dsp) n, fwhm = _mixing_factor_pv(fwhm_g, fwhm_l) - Ag = 0.9394372787/fwhm # normalization factor for unit area - Al = 1.0/np.pi # normalization factor for unit area + Ag = 0.9394372787 / fwhm # normalization factor for unit area + Al = 1.0 / np.pi # normalization factor for unit area + + g = Ag * _unit_gaussian(np.array([tth, fwhm]), tth_list) + l_val = Al * _unit_lorentzian(np.array([tth, fwhm]), tth_list) - g = Ag*_unit_gaussian(np.array([tth, fwhm]), tth_list) - l = Al*_unit_lorentzian(np.array([tth, fwhm]), tth_list) - - return n*l + (1.0-n)*g + return n * l_val + (1.0 - n) * g -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _func_h(tau, tth_r): - cph = np.cos(tth_r - tau) + cph = np.cos(tth_r - tau) ctth = np.cos(tth_r) - return np.sqrt( (cph/ctth)**2 - 1.) + return np.sqrt((cph / ctth) ** 2 - 1.0) + -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def _func_W(HoL, SoL, tau, tau_min, tau_infl, tth): - if(tth < np.pi/2.): - if tau >= 0. and tau <= tau_infl: - res = 2.0*min(HoL,SoL) + if tth < np.pi / 2.0: + if tau >= 0.0 and tau <= tau_infl: + res = 2.0 * min(HoL, SoL) elif tau > tau_infl and tau <= tau_min: - res = HoL+SoL+_func_h(tau,tth) + res = HoL + SoL + _func_h(tau, tth) else: res = 0.0 else: - if tau <= 0. and tau >= tau_infl: - res = 2.0*min(HoL,SoL) + if tau <= 0.0 and tau >= tau_infl: + res = 2.0 * min(HoL, SoL) elif tau < tau_infl and tau >= tau_min: - res = HoL+SoL+_func_h(tau,tth) + res = HoL + SoL + _func_h(tau, tth) else: res = 0.0 return res -@numba_njit_if_available(cache=True, nogil=True) -def pvfcj(uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - tth, - dsp, - hkl, - tth_list, - HoL, - SoL, - xn, - wn): + +@njit(cache=True, nogil=True) +def pvfcj( + uvw, + p, + xy, + xy_sf, + shkl, + eta_mixing, + tth, + dsp, + hkl, + tth_list, + HoL, + SoL, + xn, + wn, +): """ @author Saransh Singh, Lawrence Livermore National Lab @date 04/02/2021 SS 1.0 original @@ -325,63 +326,63 @@ def pvfcj(uvw, tth_r = np.radians(tth) ctth = np.cos(tth_r) - arg = ctth*np.sqrt(((HoL+SoL)**2+1.)) + arg = ctth * np.sqrt(((HoL + SoL) ** 2 + 1.0)) cinv = np.arccos(arg) tau_min = tth_r - cinv # two theta of inflection point - arg = ctth*np.sqrt(((HoL-SoL)**2+1.)) + arg = ctth * np.sqrt(((HoL - SoL) ** 2 + 1.0)) cinv = np.arccos(arg) tau_infl = tth_r - cinv - tau = tau_min*xn + tau = tau_min * xn cx = np.cos(tau) res = np.zeros(tth_list.shape) den = 0.0 for i in np.arange(tau.shape[0]): - x = tth_r-tau[i] + x = tth_r - tau[i] xx = tau[i] - W = _func_W(HoL,SoL,xx,tau_min,tau_infl,tth_r) + W = _func_W(HoL, SoL, xx, tau_min, tau_infl, tth_r) h = _func_h(xx, tth_r) - fact = wn[i]*(W/h/cx[i]) + fact = wn[i] * (W / h / cx[i]) den += fact - pv = pvoight_wppf(uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - np.degrees(x), - dsp, - hkl, - tth_list) - res += pv*fact - - res = np.sin(tth_r)*res/den/4./HoL/SoL + pv = pvoight_wppf( + uvw, + p, + xy, + xy_sf, + shkl, + eta_mixing, + np.degrees(x), + dsp, + hkl, + tth_list, + ) + res += pv * fact + + res = np.sin(tth_r) * res / den / 4.0 / HoL / SoL a = np.trapz(res, tth_list) - return res/a + return res / a -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _calc_alpha(alpha, tth): a0, a1 = alpha - return (a0 + a1*np.tan(np.radians(0.5*tth))) + return a0 + a1 * np.tan(np.radians(0.5 * tth)) -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def _calc_beta(beta, tth): b0, b1 = beta - return b0 + b1*np.tan(np.radians(0.5*tth)) + return b0 + b1 * np.tan(np.radians(0.5 * tth)) + -@numba_njit_if_available(cache=True, nogil=True) -def _gaussian_pink_beam(alpha, - beta, - fwhm_g, - tth, - tth_list): +@njit(cache=True, nogil=True) +def _gaussian_pink_beam(alpha, beta, fwhm_g, tth, tth_list): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/22/2021 SS 1.0 original @@ -392,32 +393,28 @@ def _gaussian_pink_beam(alpha, """ del_tth = tth_list - tth sigsqr = fwhm_g**2 - f1 = alpha*sigsqr + 2.0*del_tth - f2 = beta*sigsqr - 2.0*del_tth - f3 = np.sqrt(2.0)*fwhm_g - u = 0.5*alpha*f1 - v = 0.5*beta*f2 - y = (f1-del_tth)/f3 - z = (f2+del_tth)/f3 + f1 = alpha * sigsqr + 2.0 * del_tth + f2 = beta * sigsqr - 2.0 * del_tth + f3 = np.sqrt(2.0) * fwhm_g + u = 0.5 * alpha * f1 + v = 0.5 * beta * f2 + y = (f1 - del_tth) / f3 + z = (f2 + del_tth) / f3 t1 = erfc(y) t2 = erfc(z) g = np.zeros(tth_list.shape) - zmask = np.abs(del_tth) > 5.0 - g[~zmask] = (0.5*(alpha*beta)/(alpha + beta)) \ - * np.exp(u[~zmask])*t1[~zmask] + \ - np.exp(v[~zmask])*t2[~zmask] + zmask = np.abs(del_tth) > 5.0 + g[~zmask] = (0.5 * (alpha * beta) / (alpha + beta)) * np.exp( + u[~zmask] + ) * t1[~zmask] + np.exp(v[~zmask]) * t2[~zmask] mask = np.isnan(g) - g[mask] = 0. + g[mask] = 0.0 return g -@numba_njit_if_available(cache=True, nogil=True) -def _lorentzian_pink_beam(alpha, - beta, - fwhm_l, - tth, - tth_list): +@njit(cache=True, nogil=True) +def _lorentzian_pink_beam(alpha, beta, fwhm_l, tth, tth_list): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/22/2021 SS 1.0 original @@ -427,8 +424,8 @@ def _lorentzian_pink_beam(alpha, Von Dreele et. al., J. Appl. Cryst. (2021). 54, 3–6 """ del_tth = tth_list - tth - p = -alpha*del_tth + 1j * 0.5*alpha*fwhm_l - q = -beta*del_tth + 1j * 0.5*beta*fwhm_l + p = -alpha * del_tth + 1j * 0.5 * alpha * fwhm_l + q = -beta * del_tth + 1j * 0.5 * beta * fwhm_l y = np.zeros(tth_list.shape) @@ -437,26 +434,18 @@ def _lorentzian_pink_beam(alpha, # f1 = exp1(p) # f2 = exp1(q) - y = -(alpha*beta)/(np.pi*(alpha+beta))*(f1+f2).imag - + y = -(alpha * beta) / (np.pi * (alpha + beta)) * (f1 + f2).imag + mask = np.isnan(y) - y[mask] = 0. + y[mask] = 0.0 return y -@numba_njit_if_available(cache=True, nogil=True) -def pvoight_pink_beam(alpha, - beta, - uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - tth, - dsp, - hkl, - tth_list): + +@njit(cache=True, nogil=True) +def pvoight_pink_beam( + alpha, beta, uvw, p, xy, xy_sf, shkl, eta_mixing, tth, dsp, hkl, tth_list +): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/22/2021 SS 1.0 original @@ -466,48 +455,43 @@ def pvoight_pink_beam(alpha, alpha_exp = _calc_alpha(alpha, tth) beta_exp = _calc_beta(beta, tth) - gamma_ani_sqr = _anisotropic_peak_broadening( - shkl, hkl) + gamma_ani_sqr = _anisotropic_peak_broadening(shkl, hkl) - fwhm_g = _gaussian_fwhm(uvw, p, - gamma_ani_sqr, - eta_mixing, - tth, dsp) - fwhm_l = _lorentzian_fwhm(xy, xy_sf, - gamma_ani_sqr, eta_mixing, - tth, dsp) + fwhm_g = _gaussian_fwhm(uvw, p, gamma_ani_sqr, eta_mixing, tth, dsp) + fwhm_l = _lorentzian_fwhm(xy, xy_sf, gamma_ani_sqr, eta_mixing, tth, dsp) n, fwhm = _mixing_factor_pv(fwhm_g, fwhm_l) - g = _gaussian_pink_beam(alpha_exp, beta_exp, - fwhm_g, tth, tth_list) - l = _lorentzian_pink_beam(alpha_exp, beta_exp, - fwhm_l, tth, tth_list) - ag = np.trapz(g,tth_list) - al = np.trapz(l,tth_list) + g = _gaussian_pink_beam(alpha_exp, beta_exp, fwhm_g, tth, tth_list) + l_val = _lorentzian_pink_beam(alpha_exp, beta_exp, fwhm_l, tth, tth_list) + ag = np.trapz(g, tth_list) + al = np.trapz(l_val, tth_list) if np.abs(ag) < 1e-6: ag = 1.0 if np.abs(al) < 1e-6: al = 1.0 - return n*l/al + (1.0-n)*g/ag - -@numba_njit_if_available(cache=True, nogil=True, parallel=True) -def computespectrum_pvfcj(uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - HL, - SL, - tth, - dsp, - hkl, - tth_list, - Iobs, - xn, - wn): + return n * l_val / al + (1.0 - n) * g / ag + + +@njit(cache=True, nogil=True, parallel=True) +def computespectrum_pvfcj( + uvw, + p, + xy, + xy_sf, + shkl, + eta_mixing, + HL, + SL, + tth, + dsp, + hkl, + tth_list, + Iobs, + xn, + wn, +): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/31/2021 SS 1.0 original @@ -518,9 +502,9 @@ def computespectrum_pvfcj(uvw, """ spec = np.zeros(tth_list.shape) - nref = np.min(np.array([Iobs.shape[0], - tth.shape[0], - dsp.shape[0],hkl.shape[0]])) + nref = np.min( + np.array([Iobs.shape[0], tth.shape[0], dsp.shape[0], hkl.shape[0]]) + ) for ii in prange(nref): II = Iobs[ii] @@ -529,27 +513,18 @@ def computespectrum_pvfcj(uvw, g = hkl[ii] xs = xy_sf[ii] - pv = pvfcj(uvw,p,xy,xs, - shkl,eta_mixing, - t,d,g, - tth_list, - HL,SL,xn,wn) + pv = pvfcj( + uvw, p, xy, xs, shkl, eta_mixing, t, d, g, tth_list, HL, SL, xn, wn + ) spec += II * pv return spec -@numba_njit_if_available(cache=True, nogil=True, parallel=True) -def computespectrum_pvtch(uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - tth, - dsp, - hkl, - tth_list, - Iobs): + +@njit(cache=True, nogil=True, parallel=True) +def computespectrum_pvtch( + uvw, p, xy, xy_sf, shkl, eta_mixing, tth, dsp, hkl, tth_list, Iobs +): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/31/2021 SS 1.0 original @@ -560,9 +535,9 @@ def computespectrum_pvtch(uvw, """ spec = np.zeros(tth_list.shape) - nref = np.min(np.array([Iobs.shape[0], - tth.shape[0], - dsp.shape[0],hkl.shape[0]])) + nref = np.min( + np.array([Iobs.shape[0], tth.shape[0], dsp.shape[0], hkl.shape[0]]) + ) for ii in prange(nref): II = Iobs[ii] @@ -571,28 +546,28 @@ def computespectrum_pvtch(uvw, g = hkl[ii] xs = xy_sf[ii] - pv = pvoight_wppf(uvw,p,xy, - xs,shkl,eta_mixing, - t,d,g, - tth_list) + pv = pvoight_wppf(uvw, p, xy, xs, shkl, eta_mixing, t, d, g, tth_list) spec += II * pv return spec -@numba_njit_if_available(cache=True, nogil=True, parallel=True) -def computespectrum_pvpink(alpha, - beta, - uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - tth, - dsp, - hkl, - tth_list, - Iobs): + +@njit(cache=True, nogil=True, parallel=True) +def computespectrum_pvpink( + alpha, + beta, + uvw, + p, + xy, + xy_sf, + shkl, + eta_mixing, + tth, + dsp, + hkl, + tth_list, + Iobs, +): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/31/2021 SS 1.0 original @@ -603,9 +578,9 @@ def computespectrum_pvpink(alpha, """ spec = np.zeros(tth_list.shape) - nref = np.min(np.array([Iobs.shape[0], - tth.shape[0], - dsp.shape[0],hkl.shape[0]])) + nref = np.min( + np.array([Iobs.shape[0], tth.shape[0], dsp.shape[0], hkl.shape[0]]) + ) for ii in prange(nref): II = Iobs[ii] @@ -614,33 +589,34 @@ def computespectrum_pvpink(alpha, g = hkl[ii] xs = xy_sf[ii] - pv = pvoight_pink_beam(alpha,beta, - uvw,p,xy, - xs,shkl,eta_mixing, - t,d,g, - tth_list) + pv = pvoight_pink_beam( + alpha, beta, uvw, p, xy, xs, shkl, eta_mixing, t, d, g, tth_list + ) spec += II * pv return spec -@numba_njit_if_available(cache=True, nogil=True) -def calc_Iobs_pvfcj(uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - HL, - SL, - xn, - wn, - tth, - dsp, - hkl, - tth_list, - Icalc, - spectrum_expt, - spectrum_sim): + +@njit(cache=True, nogil=True) +def calc_Iobs_pvfcj( + uvw, + p, + xy, + xy_sf, + shkl, + eta_mixing, + HL, + SL, + xn, + wn, + tth, + dsp, + hkl, + tth_list, + Icalc, + spectrum_expt, + spectrum_sim, +): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/31/2021 SS 1.0 original @@ -650,16 +626,16 @@ def calc_Iobs_pvfcj(uvw, the final intensities """ Iobs = np.zeros(tth.shape) - nref = np.min(np.array([Icalc.shape[0], - tth.shape[0], - dsp.shape[0],hkl.shape[0]])) + nref = np.min( + np.array([Icalc.shape[0], tth.shape[0], dsp.shape[0], hkl.shape[0]]) + ) - yo = spectrum_expt[:,1] - yc = spectrum_sim[:,1] - mask = yc != 0. + yo = spectrum_expt[:, 1] + yc = spectrum_sim[:, 1] + mask = yc != 0.0 yo = yo[mask] yc = yc[mask] - tth_list_mask = spectrum_expt[:,0] + tth_list_mask = spectrum_expt[:, 0] tth_list_mask = tth_list_mask[mask] for ii in np.arange(nref): @@ -669,33 +645,47 @@ def calc_Iobs_pvfcj(uvw, g = hkl[ii] xs = xy_sf[ii] - pv = pvfcj(uvw,p,xy,xs, - shkl,eta_mixing, - t,d,g, - tth_list_mask, - HL,SL,xn,wn) + pv = pvfcj( + uvw, + p, + xy, + xs, + shkl, + eta_mixing, + t, + d, + g, + tth_list_mask, + HL, + SL, + xn, + wn, + ) y = Ic * pv y = y[mask] - Iobs[ii] = np.trapz(yo*y/yc, tth_list_mask) + Iobs[ii] = np.trapz(yo * y / yc, tth_list_mask) return Iobs -@numba_njit_if_available(cache=True, nogil=True) -def calc_Iobs_pvtch(uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - tth, - dsp, - hkl, - tth_list, - Icalc, - spectrum_expt, - spectrum_sim): + +@njit(cache=True, nogil=True) +def calc_Iobs_pvtch( + uvw, + p, + xy, + xy_sf, + shkl, + eta_mixing, + tth, + dsp, + hkl, + tth_list, + Icalc, + spectrum_expt, + spectrum_sim, +): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/31/2021 SS 1.0 original @@ -705,16 +695,16 @@ def calc_Iobs_pvtch(uvw, the final intensities """ Iobs = np.zeros(tth.shape) - nref = np.min(np.array([Icalc.shape[0], - tth.shape[0], - dsp.shape[0],hkl.shape[0]])) + nref = np.min( + np.array([Icalc.shape[0], tth.shape[0], dsp.shape[0], hkl.shape[0]]) + ) - yo = spectrum_expt[:,1] - yc = spectrum_sim[:,1] - mask = yc != 0. + yo = spectrum_expt[:, 1] + yc = spectrum_sim[:, 1] + mask = yc != 0.0 yo = yo[mask] yc = yc[mask] - tth_list_mask = spectrum_expt[:,0] + tth_list_mask = spectrum_expt[:, 0] tth_list_mask = tth_list_mask[mask] for ii in np.arange(nref): @@ -724,34 +714,36 @@ def calc_Iobs_pvtch(uvw, g = hkl[ii] xs = xy_sf[ii] - pv = pvoight_wppf(uvw,p,xy, - xs,shkl,eta_mixing, - t,d,g, - tth_list_mask) + pv = pvoight_wppf( + uvw, p, xy, xs, shkl, eta_mixing, t, d, g, tth_list_mask + ) y = Ic * pv y = y[mask] - Iobs[ii] = np.trapz(yo*y/yc, tth_list_mask) + Iobs[ii] = np.trapz(yo * y / yc, tth_list_mask) return Iobs -@numba_njit_if_available(cache=True, nogil=True) -def calc_Iobs_pvpink(alpha, - beta, - uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - tth, - dsp, - hkl, - tth_list, - Icalc, - spectrum_expt, - spectrum_sim): + +@njit(cache=True, nogil=True) +def calc_Iobs_pvpink( + alpha, + beta, + uvw, + p, + xy, + xy_sf, + shkl, + eta_mixing, + tth, + dsp, + hkl, + tth_list, + Icalc, + spectrum_expt, + spectrum_sim, +): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/31/2021 SS 1.0 original @@ -761,16 +753,16 @@ def calc_Iobs_pvpink(alpha, the final intensities """ Iobs = np.zeros(tth.shape) - nref = np.min(np.array([Icalc.shape[0], - tth.shape[0], - dsp.shape[0],hkl.shape[0]])) + nref = np.min( + np.array([Icalc.shape[0], tth.shape[0], dsp.shape[0], hkl.shape[0]]) + ) - yo = spectrum_expt[:,1] - yc = spectrum_sim[:,1] - mask = yc != 0. + yo = spectrum_expt[:, 1] + yc = spectrum_sim[:, 1] + mask = yc != 0.0 yo = yo[mask] yc = yc[mask] - tth_list_mask = spectrum_expt[:,0] + tth_list_mask = spectrum_expt[:, 0] tth_list_mask = tth_list_mask[mask] for ii in prange(nref): @@ -780,24 +772,31 @@ def calc_Iobs_pvpink(alpha, g = hkl[ii] xs = xy_sf[ii] - pv = pvoight_pink_beam(alpha, beta, - uvw,p,xy,xs, - shkl,eta_mixing, - t,d,g, - tth_list_mask) + pv = pvoight_pink_beam( + alpha, + beta, + uvw, + p, + xy, + xs, + shkl, + eta_mixing, + t, + d, + g, + tth_list_mask, + ) y = Ic * pv y = y[mask] - Iobs[ii] = np.trapz(yo*y/yc, tth_list_mask) + Iobs[ii] = np.trapz(yo * y / yc, tth_list_mask) return Iobs -@numba_njit_if_available(cache=True, nogil=True) -def calc_rwp(spectrum_sim, - spectrum_expt, - weights, - P): + +@njit(cache=True, nogil=True) +def calc_rwp(spectrum_sim, spectrum_expt, weights, P): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/31/2021 SS 1.0 original @@ -805,9 +804,9 @@ def calc_rwp(spectrum_sim, moved outside of the class to allow numba implementation P : number of independent parameters in fitting """ - err = weights[:,1]*(spectrum_sim[:,1] - spectrum_expt[:,1])**2 + err = weights[:, 1] * (spectrum_sim[:, 1] - spectrum_expt[:, 1]) ** 2 - weighted_expt = weights[:,1] * spectrum_expt[:,1] **2 + weighted_expt = weights[:, 1] * spectrum_expt[:, 1] ** 2 errvec = np.sqrt(err) @@ -816,9 +815,9 @@ def calc_rwp(spectrum_sim, den = np.sum(weighted_expt) """ standard Rwp i.e. weighted residual """ - if(den > 0.): - if(wss/den > 0.): - Rwp = np.sqrt(wss/den) + if den > 0.0: + if wss / den > 0.0: + Rwp = np.sqrt(wss / den) else: Rwp = np.inf else: @@ -827,17 +826,17 @@ def calc_rwp(spectrum_sim, """ number of observations to fit i.e. number of data points """ N = spectrum_sim.shape[0] - if den > 0.: - if (N-P)/den > 0: - Rexp = np.sqrt((N-P)/den) + if den > 0.0: + if (N - P) / den > 0: + Rexp = np.sqrt((N - P) / den) else: Rexp = 0.0 else: Rexp = np.inf # Rwp and goodness of fit parameters - if Rexp > 0.: - gofF = Rwp/Rexp + if Rexp > 0.0: + gofF = Rwp / Rexp else: gofF = np.inf diff --git a/hexrd/wppf/texture.py b/hexrd/wppf/texture.py index 6bf205990..33c5e9f07 100644 --- a/hexrd/wppf/texture.py +++ b/hexrd/wppf/texture.py @@ -1062,7 +1062,6 @@ def initialize_crystal_dir(self, if samplingtype.lower() == "fem": msg = "sampling type FEM not implemented yet." raise ValueError(msg) - pass @property def sample_dir(self): diff --git a/hexrd/wppf/xtal.py b/hexrd/wppf/xtal.py index f56cdc744..1cd6892ce 100644 --- a/hexrd/wppf/xtal.py +++ b/hexrd/wppf/xtal.py @@ -1,35 +1,31 @@ import numpy as np -from hexrd.utils.decorators import numba_njit_if_available +from numba import njit, prange + from hexrd import constants -import numba -if constants.USE_NUMBA: - from numba import prange -else: - prange = range -@numba_njit_if_available(cache=True, nogil=True) +@njit(cache=True, nogil=True) def _calc_dspacing(rmt, hkls): nhkls = hkls.shape[0] dsp = np.zeros(hkls.shape[0]) for ii in np.arange(nhkls): - g = hkls[ii,:] - dsp[ii] = 1.0/np.sqrt(np.dot(g, - np.dot(rmt, g))) + g = hkls[ii, :] + dsp[ii] = 1.0 / np.sqrt(np.dot(g, np.dot(rmt, g))) return dsp -@numba_njit_if_available(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def _get_tth(dsp, wavelength): nn = dsp.shape[0] tth = np.zeros(dsp.shape[0]) wavelength_allowed_hkls = np.zeros(dsp.shape[0]) for ii in np.arange(nn): d = dsp[ii] - glen = 1./d - sth = glen*wavelength/2. - if(np.abs(sth) <= 1.0): - t = 2. * np.degrees(np.arcsin(sth)) + glen = 1.0 / d + sth = glen * wavelength / 2.0 + if np.abs(sth) <= 1.0: + t = 2.0 * np.degrees(np.arcsin(sth)) tth[ii] = t wavelength_allowed_hkls[ii] = 1 else: @@ -38,122 +34,129 @@ def _get_tth(dsp, wavelength): return tth, wavelength_allowed_hkls -@numba_njit_if_available(cache=True, nogil=True) -def _calcanomalousformfactor(atom_type, - wavelength, - frel, - f_anomalous_data, - f_anomalous_data_sizes): - f_anam = np.zeros(atom_type.shape,dtype=np.complex64) +@njit(cache=True, nogil=True) +def _calcanomalousformfactor( + atom_type, wavelength, frel, f_anomalous_data, f_anomalous_data_sizes +): + + f_anam = np.zeros(atom_type.shape, dtype=np.complex64) for i in range(atom_type.shape[0]): nd = f_anomalous_data_sizes[i] Z = atom_type[i] fr = frel[i] - f_data = f_anomalous_data[i,:,:] - xp = f_data[:nd,0] - yp = f_data[:nd,1] - ypp = f_data[:nd,2] + f_data = f_anomalous_data[i, :, :] + xp = f_data[:nd, 0] + yp = f_data[:nd, 1] + ypp = f_data[:nd, 2] - f1 = np.interp(wavelength,xp,yp) - f2 = np.interp(wavelength,xp,ypp) + f1 = np.interp(wavelength, xp, yp) + f2 = np.interp(wavelength, xp, ypp) - f_anam[i] = complex(f1+fr-Z,f2) + f_anam[i] = complex(f1 + fr - Z, f2) return f_anam -@numba_njit_if_available(cache=True, nogil=True) -def _calcxrayformfactor(wavelength, - s, + +@njit(cache=True, nogil=True) +def _calcxrayformfactor( + wavelength, + s, atom_type, - scatfac, - fNT, - frel, + scatfac, + fNT, + frel, f_anomalous_data, - f_anomalous_data_sizes): - - f_anomalous = _calcanomalousformfactor(atom_type, - wavelength, - frel, - f_anomalous_data, - f_anomalous_data_sizes) - ff = np.zeros(atom_type.shape,dtype=np.complex64) + f_anomalous_data_sizes, +): + + f_anomalous = _calcanomalousformfactor( + atom_type, wavelength, frel, f_anomalous_data, f_anomalous_data_sizes + ) + ff = np.zeros(atom_type.shape, dtype=np.complex64) for ii in range(atom_type.shape[0]): - sfact = scatfac[ii,:] + sfact = scatfac[ii, :] fe = sfact[5] for jj in range(5): - fe += sfact[jj] * np.exp(-sfact[jj+6]*s) + fe += sfact[jj] * np.exp(-sfact[jj + 6] * s) - ff[ii] = fe+fNT[ii]+f_anomalous[ii] + ff[ii] = fe + fNT[ii] + f_anomalous[ii] return ff -@numba_njit_if_available(cache=True, nogil=True, parallel=True) -def _calcxrsf(hkls, - nref, - multiplicity, - w_int, - wavelength, - rmt, - atom_type, - atom_ntype, - betaij, - occ, - asym_pos_arr, - numat, - scatfac, - fNT, - frel, - f_anomalous_data, - f_anomalous_data_sizes): - - struct_factors = np.zeros(multiplicity.shape, - dtype=np.float64) - - struct_factors_raw = np.zeros(multiplicity.shape, - dtype=np.float64) +@njit(cache=True, nogil=True, parallel=True) +def _calcxrsf( + hkls, + nref, + multiplicity, + w_int, + wavelength, + rmt, + atom_type, + atom_ntype, + betaij, + occ, + asym_pos_arr, + numat, + scatfac, + fNT, + frel, + f_anomalous_data, + f_anomalous_data_sizes, +): + + struct_factors = np.zeros(multiplicity.shape, dtype=np.float64) + + struct_factors_raw = np.zeros(multiplicity.shape, dtype=np.float64) for ii in prange(nref): - g = hkls[ii,:] + g = hkls[ii, :] mm = multiplicity[ii] - glen = np.dot(g,np.dot(rmt,g)) - s = 0.25 * glen * 1E-2 - sf = complex(0., 0.) - formfact = _calcxrayformfactor(wavelength, - s, - atom_type, - scatfac, - fNT, - frel, - f_anomalous_data, - f_anomalous_data_sizes) + glen = np.dot(g, np.dot(rmt, g)) + s = 0.25 * glen * 1e-2 + sf = complex(0.0, 0.0) + formfact = _calcxrayformfactor( + wavelength, + s, + atom_type, + scatfac, + fNT, + frel, + f_anomalous_data, + f_anomalous_data_sizes, + ) for jj in range(atom_ntype): natom = numat[jj] - apos = asym_pos_arr[:natom,jj,:] + apos = asym_pos_arr[:natom, jj, :] if betaij.ndim > 1: - b = betaij[:,:,jj] - arg = b[0,0]*g[0]**2+\ - b[1,1]*g[1]**2+\ - b[2,2]*g[2]**2+\ - 2.0*(b[0,1]*g[0]*g[1]+\ - b[0,2]*g[0]*g[2]+\ - b[1,2]*g[1]*g[2]) + b = betaij[:, :, jj] + arg = ( + b[0, 0] * g[0] ** 2 + + b[1, 1] * g[1] ** 2 + + b[2, 2] * g[2] ** 2 + + 2.0 + * ( + b[0, 1] * g[0] * g[1] + + b[0, 2] * g[0] * g[2] + + b[1, 2] * g[1] * g[2] + ) + ) arg = -arg else: - arg = -8.0*np.pi**2 * betaij[jj]*s + arg = -8.0 * np.pi**2 * betaij[jj] * s T = np.exp(arg) - ff = formfact[jj]*occ[jj]*T + ff = formfact[jj] * occ[jj] * T for kk in range(natom): - r = apos[kk,:] - arg = 2.0 * np.pi * np.sum(g*r) - sf = sf + ff*complex(np.cos(arg), -np.sin(arg)) + r = apos[kk, :] + arg = 2.0 * np.pi * np.sum(g * r) + sf = sf + ff * complex(np.cos(arg), -np.sin(arg)) - struct_factors_raw[ii] = np.abs(sf)**2 - struct_factors[ii] = w_int*mm*struct_factors_raw[ii] + struct_factors_raw[ii] = np.abs(sf) ** 2 + struct_factors[ii] = w_int * mm * struct_factors_raw[ii] # ma = struct_factors.max() # struct_factors = 100.0*struct_factors/ma @@ -161,81 +164,77 @@ def _calcxrsf(hkls, # struct_factors_raw = 100.0*struct_factors_raw/ma return struct_factors, struct_factors_raw -@numba_njit_if_available(cache=True, nogil=True) -def _calc_x_factor(K, - v_unitcell, - wavelength, - f_sqr, - D): - return f_sqr*(K*wavelength*D/v_unitcell)**2 - -@numba_njit_if_available(cache=True, nogil=True) -def _calc_bragg_factor(x,tth): - stth = np.sin(np.radians(tth*0.5))**2 - return stth/np.sqrt(1.+x) - - -@numba_njit_if_available(cache=True, nogil=True) -def _calc_laue_factor(x,tth): - ctth = np.cos(np.radians(tth*0.5))**2 - if x <= 1.: - El = (1.-0.5*x+0.25*x**2-(5./48.)*x**3+(7./192.)*x**4) - elif x > 1.: - El = (2./np.pi/x)**2 * (1.-(1/8./x)-(3./128.)*(1./x)**2-\ - (15./1024.)*(1/x)**3) - return El*ctth - -@numba_njit_if_available(cache=True, nogil=True, parallel=True) -def _calc_extinction_factor(hkls, - tth, - v_unitcell, - wavelength, - f_sqr, - K, - D): - nref = np.min(np.array([hkls.shape[0],\ - tth.shape[0]])) + +@njit(cache=True, nogil=True) +def _calc_x_factor(K, v_unitcell, wavelength, f_sqr, D): + return f_sqr * (K * wavelength * D / v_unitcell) ** 2 + + +@njit(cache=True, nogil=True) +def _calc_bragg_factor(x, tth): + stth = np.sin(np.radians(tth * 0.5)) ** 2 + return stth / np.sqrt(1.0 + x) + + +@njit(cache=True, nogil=True) +def _calc_laue_factor(x, tth): + ctth = np.cos(np.radians(tth * 0.5)) ** 2 + if x <= 1.0: + El = ( + 1.0 + - 0.5 * x + + 0.25 * x**2 + - (5.0 / 48.0) * x**3 + + (7.0 / 192.0) * x**4 + ) + elif x > 1.0: + El = (2.0 / np.pi / x) ** 2 * ( + 1.0 + - (1 / 8.0 / x) + - (3.0 / 128.0) * (1.0 / x) ** 2 + - (15.0 / 1024.0) * (1 / x) ** 3 + ) + return El * ctth + + +@njit(cache=True, nogil=True, parallel=True) +def _calc_extinction_factor(hkls, tth, v_unitcell, wavelength, f_sqr, K, D): + nref = np.min(np.array([hkls.shape[0], tth.shape[0]])) extinction = np.zeros(nref) for ii in prange(nref): - fs = f_sqr[ii] - t = tth[ii] - x = _calc_x_factor(K,v_unitcell, - wavelength, - fs,D) - extinction[ii] = _calc_bragg_factor(x,t)+\ - _calc_laue_factor(x,t) + fs = f_sqr[ii] + t = tth[ii] + x = _calc_x_factor(K, v_unitcell, wavelength, fs, D) + extinction[ii] = _calc_bragg_factor(x, t) + _calc_laue_factor(x, t) return extinction -@numba_njit_if_available(cache=True, nogil=True, parallel=True) -def _calc_absorption_factor(abs_fact, - tth, - phi, - wavelength): + +@njit(cache=True, nogil=True, parallel=True) +def _calc_absorption_factor(abs_fact, tth, phi, wavelength): nref = tth.shape[0] absorption = np.zeros(nref) phir = np.radians(phi) - abl = -abs_fact*wavelength + abl = -abs_fact * wavelength for ii in prange(nref): - t = np.radians(tth[ii])*0.5 + t = np.radians(tth[ii]) * 0.5 - if(np.abs(phir) > 1e-3): - c1 = np.cos(t+phir) - c2 = np.cos(t-phir) + if np.abs(phir) > 1e-3: + c1 = np.cos(t + phir) + c2 = np.cos(t - phir) - f1 = np.exp(abl/c1) - f2 = np.exp(abl/c2) - if np.abs(c2) > 1e-3: - f3 = abl*(1. - c1/c2) - else: - f3 = np.inf + f1 = np.exp(abl / c1) + f2 = np.exp(abl / c2) + if np.abs(c2) > 1e-3: + f3 = abl * (1.0 - c1 / c2) + else: + f3 = np.inf - absorption[ii] = (f1-f2)/f3 - else: - c = np.cos(t) - absorption[ii] = np.exp(abl/c) + absorption[ii] = (f1 - f2) / f3 + else: + c = np.cos(t) + absorption[ii] = np.exp(abl / c) return absorption - \ No newline at end of file diff --git a/hexrd/xrdutil/nf-hedm/kerberos_notes.txt b/hexrd/xrdutil/nf-hedm/kerberos_notes.txt deleted file mode 100644 index 0cfdd8db9..000000000 --- a/hexrd/xrdutil/nf-hedm/kerberos_notes.txt +++ /dev/null @@ -1,36 +0,0 @@ -For Farm users, we have recommended that a Kerberos key-tab file be used instead of ssh keys, so that you don't need to manage a known_hosts file entry for every host you need to access in the Farm. - - -To make a Kerberos keytab file, using ktutil, please do the following (enter your password, when prompted): - -``` -mkdir /home/$USER/etc -ktutil -addent -password -p $USER@CLASSE.CORNELL.EDU -k KVNO -e aes256-cts-hmac-sha1-96 -wkt /home/$USER/etc/$USER-keytab -quit -``` - -After creating the $USER-keytab file you can see what's in the file by typing: - -``` -klist -t -e -k ~/etc/$USER-keytab -``` - -The output should be something like: - -``` -[amd275@lnx7179 ~]$ klist -t -e -k ~/etc/amd275-keytab - -Keytab name: FILE:etc/amd275-keytab -KVNO Timestamp Principal ----- ------------------- ------------------------------------------------------ - 0 07/11/2019 13:37:04 amd275@CLASSE.CORNELL.EDU (aes256-cts-hmac-sha1-96) -``` - -So then just add the following to your job script (after all the SGE "#$" directive statements) - -``` -# Enable Kerberos ticket: -/usr/bin/kinit -k -t /home/$USER/etc/$USER-keytab $USER -``` diff --git a/hexrd/xrdutil/nf-hedm/mpi_example.sh b/hexrd/xrdutil/nf-hedm/mpi_example.sh deleted file mode 100644 index a31a13c99..000000000 --- a/hexrd/xrdutil/nf-hedm/mpi_example.sh +++ /dev/null @@ -1,20 +0,0 @@ -#$ -q chess_fast.q -#$ -S /bin/bash -#$ -cwd -#$ -N mpi-test -#$ -pe sge_pe_rr 224 -#$ -l h="lnx301|lnx302|lnx303|lnx304" - -# Need to set up kerberos keytab to use this -/usr/bin/kinit -k -t /home/$USER/etc/$USER-keytab $USER - -echo "Hostname: $(hostname)" - -# Load mpi -module load mpi - -# Activate conda environment -source /nfs/chess/user/pavery/virtualenvs/conda/bin/activate -conda activate hexrd - -time mpiexec -npersocket 1 python nf-HEDM_test.py --check check.pkl --chunk-size 100 --ncpus 28 diff --git a/hexrd/xrdutil/nf-hedm/nf-HEDM_test.py b/hexrd/xrdutil/nf-hedm/nf-HEDM_test.py deleted file mode 100644 index 4d35d2b1a..000000000 --- a/hexrd/xrdutil/nf-hedm/nf-HEDM_test.py +++ /dev/null @@ -1,1240 +0,0 @@ -""" -Refactor of simulate_nf so that an experiment is mocked up. - -Also trying to minimize imports -""" - -import os -import logging - -import numpy as np -import numba -import yaml -import argparse -import timeit -import contextlib -import multiprocessing -import tempfile -import shutil -import socket - -# import of hexrd modules -import hexrd -from hexrd import constants -from hexrd import instrument -from hexrd import material -from hexrd import rotations -from hexrd.transforms import xfcapi -from hexrd import valunits -from hexrd import xrdutil - -from skimage.morphology import dilation as ski_dilation - -hostname = socket.gethostname() - -USE_MPI = False -rank = 0 -try: - from mpi4py import MPI - comm = MPI.COMM_WORLD - world_size = comm.Get_size() - rank = comm.Get_rank() - USE_MPI = world_size > 1 - logging.info(f'{rank=} {world_size=} {hostname=}') -except ImportError: - logging.warning(f'mpi4py failed to load on {hostname=}. MPI is disabled.') - pass - - -beam = constants.beam_vec -Z_l = constants.lab_z -vInv_ref = constants.identity_6x1 - - -# ============================================================================== -# %% SOME SCAFFOLDING -# ============================================================================== - - -class ProcessController: - """This is a 'controller' that provides the necessary hooks to - track the results of the process as well as to provide clues of - the progress of the process""" - - def __init__(self, result_handler=None, progress_observer=None, ncpus=1, - chunk_size=100): - self.rh = result_handler - self.po = progress_observer - self.ncpus = ncpus - self.chunk_size = chunk_size - self.limits = {} - self.timing = [] - - # progress handling ------------------------------------------------------- - - def start(self, name, count): - self.po.start(name, count) - t = timeit.default_timer() - self.timing.append((name, count, t)) - - def finish(self, name): - t = timeit.default_timer() - self.po.finish() - entry = self.timing.pop() - assert name == entry[0] - total = t - entry[2] - logging.info("%s took %8.3fs (%8.6fs per item).", - entry[0], total, total/entry[1]) - - def update(self, value): - self.po.update(value) - - # result handler ---------------------------------------------------------- - - def handle_result(self, key, value): - logging.debug("handle_result (%(key)s)", locals()) - self.rh.handle_result(key, value) - - # value limitting --------------------------------------------------------- - def set_limit(self, key, limit_function): - if key in self.limits: - logging.warn("Overwritting limit funtion for '%(key)s'", locals()) - - self.limits[key] = limit_function - - def limit(self, key, value): - try: - value = self.limits[key](value) - except KeyError: - pass - except Exception: - logging.warn("Could not apply limit to '%(key)s'", locals()) - - return value - - # configuration ---------------------------------------------------------- - - def get_process_count(self): - return self.ncpus - - def get_chunk_size(self): - return self.chunk_size - - -def null_progress_observer(): - class NullProgressObserver: - def start(self, name, count): - pass - - def update(self, value): - pass - - def finish(self): - pass - - return NullProgressObserver() - - -def progressbar_progress_observer(): - - class ProgressBarProgressObserver: - def start(self, name, count): - from progressbar import ProgressBar, Percentage, Bar - - self.pbar = ProgressBar(widgets=[name, Percentage(), Bar()], - maxval=count) - self.pbar.start() - - def update(self, value): - self.pbar.update(value) - - def finish(self): - self.pbar.finish() - - return ProgressBarProgressObserver() - - -def forgetful_result_handler(): - class ForgetfulResultHandler: - def handle_result(self, key, value): - pass # do nothing - - return ForgetfulResultHandler() - - -def saving_result_handler(filename): - """returns a result handler that saves the resulting arrays into a file - with name filename""" - class SavingResultHandler: - def __init__(self, file_name): - self.filename = file_name - self.arrays = {} - - def handle_result(self, key, value): - self.arrays[key] = value - - def __del__(self): - logging.debug("Writing arrays in %(filename)s", self.__dict__) - try: - np.savez_compressed(open(self.filename, "wb"), **self.arrays) - except IOError: - logging.error("Failed to write %(filename)s", self.__dict__) - - return SavingResultHandler(filename) - - -def checking_result_handler(filename): - """returns a return handler that checks the results against a - reference file. - - The Check will consider a FAIL either a result not present in the - reference file (saved as a numpy savez or savez_compressed) or a - result that differs. It will consider a PARTIAL PASS if the - reference file has a shorter result, but the existing results - match. A FULL PASS will happen when all existing results match - - """ - class CheckingResultHandler: - def __init__(self, reference_file): - """Checks the result against those save in 'reference_file'""" - logging.info("Loading reference results from '%s'", reference_file) - self.reference_results = np.load(open(reference_file, 'rb')) - - def handle_result(self, key, value): - if key in ['experiment', 'image_stack']: - return # ignore these - - try: - reference = self.reference_results[key] - except KeyError as e: - logging.warning("%(key)s: %(e)s", locals()) - reference = None - - if reference is None: - msg = "'{0}': No reference result." - logging.warn(msg.format(key)) - - try: - if key == "confidence": - reference = reference.T - value = value.T - - check_len = min(len(reference), len(value)) - test_passed = np.allclose(value[:check_len], - reference[:check_len]) - - if not test_passed: - msg = "'{0}': FAIL" - logging.warn(msg.format(key)) - lvl = logging.WARN - elif len(value) > check_len: - msg = "'{0}': PARTIAL PASS" - lvl = logging.WARN - else: - msg = "'{0}': FULL PASS" - lvl = logging.INFO - logging.log(lvl, msg.format(key)) - except Exception as e: - msg = "%(key)s: Failure trying to check the results.\n%(e)s" - logging.error(msg, locals()) - - return CheckingResultHandler(filename) - - -# ============================================================================== -# %% SETUP FUNCTION -# ============================================================================== -def mockup_experiment(): - # user options - # each grain is provided in the form of a quaternion. - - # The following array contains the quaternions for the array. Note that the - # quaternions are in the columns, with the first row (row 0) being the real - # part w. We assume that we are dealing with unit quaternions - - quats = np.array([[0.91836393, 0.90869942], - [0.33952917, 0.18348350], - [0.17216207, 0.10095837], - [0.10811041, 0.36111851]]) - - n_grains = quats.shape[-1] # last dimension provides the number of grains - phis = 2.*np.arccos(quats[0, :]) # phis are the angles for the quaternion - # ns contains the rotation axis as an unit vector - ns = hexrd.matrixutil.unitVector(quats[1:, :]) - exp_maps = np.array([phis[i]*ns[:, i] for i in range(n_grains)]) - rMat_c = rotations.rotMatOfQuat(quats) - - cvec = np.arange(-25, 26) - X, Y, Z = np.meshgrid(cvec, cvec, cvec) - - crd0 = 1e-3*np.vstack([X.flatten(), Y.flatten(), Z.flatten()]).T - crd1 = crd0 + np.r_[0.100, 0.100, 0] - crds = np.array([crd0, crd1]) - - # make grain parameters - grain_params = [] - for i in range(n_grains): - for j in range(len(crd0)): - grain_params.append( - np.hstack([exp_maps[i, :], crds[i][j, :], vInv_ref]) - ) - - # scan range and period - ome_period = (0, 2*np.pi) - ome_range = [ome_period, ] - ome_step = np.radians(1.) - nframes = 0 - for i in range(len(ome_range)): - nframes += int((ome_range[i][1]-ome_range[i][0])/ome_step) - - ome_edges = np.arange(nframes+1)*ome_step - - # instrument - with open('./retiga.yml', 'r') as fildes: - instr = instrument.HEDMInstrument(yaml.safe_load(fildes)) - panel = next(iter(instr.detectors.values())) # !!! there is only 1 - - # tranform paramters - # Sample - chi = instr.chi - tVec_s = instr.tvec - # Detector - rMat_d = panel.rmat - tilt_angles_xyzp = np.asarray(rotations.angles_from_rmat_xyz(rMat_d)) - tVec_d = panel.tvec - - # pixels - row_ps = panel.pixel_size_row - col_ps = panel.pixel_size_col - pixel_size = (row_ps, col_ps) - nrows = panel.rows - ncols = panel.cols - - # panel dimensions - panel_dims = [tuple(panel.corner_ll), - tuple(panel.corner_ur)] - - x_col_edges = panel.col_edge_vec - y_row_edges = panel.row_edge_vec - rx, ry = np.meshgrid(x_col_edges, y_row_edges) - - max_pixel_tth = instrument.max_tth(instr) - - detector_params = np.hstack([tilt_angles_xyzp, tVec_d, chi, tVec_s]) - distortion = panel.distortion # !!! must be None for now - - # a different parametrization for the sensor - # (makes for faster quantization) - base = np.array([x_col_edges[0], - y_row_edges[0], - ome_edges[0]]) - deltas = np.array([x_col_edges[1] - x_col_edges[0], - y_row_edges[1] - y_row_edges[0], - ome_edges[1] - ome_edges[0]]) - inv_deltas = 1.0/deltas - clip_vals = np.array([ncols, nrows]) - - # dilation - max_diameter = np.sqrt(3)*0.005 - row_dilation = int(np.ceil(0.5 * max_diameter/row_ps)) - col_dilation = int(np.ceil(0.5 * max_diameter/col_ps)) - - # crystallography data - beam_energy = valunits.valWUnit("beam_energy", "energy", instr.beam_energy, "keV") - beam_wavelength = constants.keVToAngstrom(beam_energy.getVal('keV')) - dmin = valunits.valWUnit("dmin", "length", - 0.5*beam_wavelength/np.sin(0.5*max_pixel_tth), - "angstrom") - - gold = material.Material() - gold.latticeParameters = [4.0782] - gold.dmin = dmin - gold.beamEnergy = beam_energy - gold.planeData.exclusions = None - gold.planeData.tThMax = max_pixel_tth # note this comes detector - - ns = argparse.Namespace() - # grains related information - ns.n_grains = n_grains # this can be derived from other values... - ns.rMat_c = rMat_c # n_grains rotation matrices (one per grain) - ns.exp_maps = exp_maps # n_grains exp_maps (one per grain) - - ns.plane_data = gold.planeData - ns.detector_params = detector_params - ns.pixel_size = pixel_size - ns.ome_range = ome_range - ns.ome_period = ome_period - ns.x_col_edges = x_col_edges - ns.y_row_edges = y_row_edges - ns.ome_edges = ome_edges - ns.ncols = ncols - ns.nrows = nrows - ns.nframes = nframes # used only in simulate... - ns.rMat_d = rMat_d - ns.tVec_d = tVec_d - ns.chi = chi # note this is used to compute S... why is it needed? - ns.tVec_s = tVec_s - ns.rMat_c = rMat_c - ns.row_dilation = row_dilation - ns.col_dilation = col_dilation - ns.distortion = distortion - ns.panel_dims = panel_dims # used only in simulate... - ns.base = base - ns.inv_deltas = inv_deltas - ns.clip_vals = clip_vals - - return grain_params, ns - - -# ============================================================================= -# %% OPTIMIZED BITS -# ============================================================================= - -# Some basic 3d algebra ======================================================= -@numba.njit(nogil=True, cache=True) -def _v3_dot(a, b): - return a[0]*b[0] + a[1]*b[1] + a[2]*b[2] - - -@numba.njit(nogil=True, cache=True) -def _m33_v3_multiply(m, v, dst): - v0 = v[0] - v1 = v[1] - v2 = v[2] - dst[0] = m[0, 0]*v0 + m[0, 1]*v1 + m[0, 2]*v2 - dst[1] = m[1, 0]*v0 + m[1, 1]*v1 + m[1, 2]*v2 - dst[2] = m[2, 0]*v0 + m[2, 1]*v1 + m[2, 2]*v2 - - return dst - - -@numba.njit(nogil=True, cache=True) -def _v3_normalized(src, dst): - v0 = src[0] - v1 = src[1] - v2 = src[2] - sqr_norm = v0*v0 + v1*v1 + v2*v2 - inv_norm = 1.0 if sqr_norm == 0.0 else 1./np.sqrt(sqr_norm) - - dst[0] = v0 * inv_norm - dst[1] = v1 * inv_norm - dst[2] = v2 * inv_norm - - return dst - - -@numba.njit(nogil=True, cache=True) -def _make_binary_rot_mat(src, dst): - v0 = src[0] - v1 = src[1] - v2 = src[2] - - dst[0, 0] = 2.0*v0*v0 - 1.0 - dst[0, 1] = 2.0*v0*v1 - dst[0, 2] = 2.0*v0*v2 - dst[1, 0] = 2.0*v1*v0 - dst[1, 1] = 2.0*v1*v1 - 1.0 - dst[1, 2] = 2.0*v1*v2 - dst[2, 0] = 2.0*v2*v0 - dst[2, 1] = 2.0*v2*v1 - dst[2, 2] = 2.0*v2*v2 - 1.0 - - return dst - - -# code transcribed in numba from transforms module ============================ - -# This is equivalent to the transform module anglesToGVec, but written in -# numba. This should end in a module to share with other scripts -@numba.njit(nogil=True, cache=True) -def _anglesToGVec(angs, rMat_ss, rMat_c): - """From a set of angles return them in crystal space""" - result = np.empty_like(angs) - for i in range(len(angs)): - cx = np.cos(0.5*angs[i, 0]) - sx = np.sin(0.5*angs[i, 0]) - cy = np.cos(angs[i, 1]) - sy = np.sin(angs[i, 1]) - g0 = cx*cy - g1 = cx*sy - g2 = sx - - # with g being [cx*xy, cx*sy, sx] - # result = dot(rMat_c, dot(rMat_ss[i], g)) - t0_0 = \ - rMat_ss[i, 0, 0]*g0 + rMat_ss[i, 1, 0]*g1 + rMat_ss[i, 2, 0]*g2 - t0_1 = \ - rMat_ss[i, 0, 1]*g0 + rMat_ss[i, 1, 1]*g1 + rMat_ss[i, 2, 1]*g2 - t0_2 = \ - rMat_ss[i, 0, 2]*g0 + rMat_ss[i, 1, 2]*g1 + rMat_ss[i, 2, 2]*g2 - - result[i, 0] = \ - rMat_c[0, 0]*t0_0 + rMat_c[1, 0]*t0_1 + rMat_c[2, 0]*t0_2 - result[i, 1] = \ - rMat_c[0, 1]*t0_0 + rMat_c[1, 1]*t0_1 + rMat_c[2, 1]*t0_2 - result[i, 2] = \ - rMat_c[0, 2]*t0_0 + rMat_c[1, 2]*t0_1 + rMat_c[2, 2]*t0_2 - - return result - - -# This is equivalent to the transform's module gvec_to_xy, -# but written in numba. -# As of now, it is not a good replacement as efficient allocation of the -# temporary arrays is not competitive with the stack allocation using in -# the C version of the code (WiP) - -# tC varies per coord -# gvec_cs, rSm varies per grain -# -# gvec_cs -@numba.njit(nogil=True, cache=True) -def _gvec_to_detector_array(vG_sn, rD, rSn, rC, tD, tS, tC): - """ beamVec is the beam vector: (0, 0, -1) in this case """ - ztol = xrdutil.epsf - p3_l = np.empty((3,)) - tmp_vec = np.empty((3,)) - vG_l = np.empty((3,)) - tD_l = np.empty((3,)) - norm_vG_s = np.empty((3,)) - norm_beam = np.empty((3,)) - tZ_l = np.empty((3,)) - brMat = np.empty((3, 3)) - result = np.empty((len(rSn), 2)) - - _v3_normalized(beam, norm_beam) - _m33_v3_multiply(rD, Z_l, tZ_l) - - for i in range(len(rSn)): - _m33_v3_multiply(rSn[i], tC, p3_l) - p3_l += tS - p3_minus_p1_l = tD - p3_l - - num = _v3_dot(tZ_l, p3_minus_p1_l) - _v3_normalized(vG_sn[i], norm_vG_s) - - _m33_v3_multiply(rC, norm_vG_s, tmp_vec) - _m33_v3_multiply(rSn[i], tmp_vec, vG_l) - - bDot = -_v3_dot(norm_beam, vG_l) - - if bDot < ztol or bDot > 1.0 - ztol: - result[i, 0] = np.nan - result[i, 1] = np.nan - continue - - _make_binary_rot_mat(vG_l, brMat) - _m33_v3_multiply(brMat, norm_beam, tD_l) - denom = _v3_dot(tZ_l, tD_l) - - if denom < ztol: - result[i, 0] = np.nan - result[i, 1] = np.nan - continue - - u = num/denom - tmp_res = u*tD_l - p3_minus_p1_l - result[i, 0] = _v3_dot(tmp_res, rD[:, 0]) - result[i, 1] = _v3_dot(tmp_res, rD[:, 1]) - - return result - - -@numba.njit(nogil=True, cache=True) -def _quant_and_clip_confidence(coords, angles, image, - base, inv_deltas, clip_vals): - """quantize and clip the parametric coordinates in coords + angles - - coords - (..., 2) array: input 2d parametric coordinates - angles - (...) array: additional dimension for coordinates - base - (3,) array: base value for quantization (for each dimension) - inv_deltas - (3,) array: inverse of the quantum size (for each dimension) - clip_vals - (2,) array: clip size (only applied to coords dimensions) - - clipping is performed on ranges [0, clip_vals[0]] for x and - [0, clip_vals[1]] for y - - returns an array with the quantized coordinates, with coordinates - falling outside the clip zone filtered out. - - """ - count = len(coords) - - in_sensor = 0 - matches = 0 - for i in range(count): - xf = coords[i, 0] - yf = coords[i, 1] - - xf = np.floor((xf - base[0]) * inv_deltas[0]) - if not xf >= 0.0: - continue - if not xf < clip_vals[0]: - continue - - yf = np.floor((yf - base[1]) * inv_deltas[1]) - - if not yf >= 0.0: - continue - if not yf < clip_vals[1]: - continue - - zf = np.floor((angles[i] - base[2]) * inv_deltas[2]) - - in_sensor += 1 - - x, y, z = int(xf), int(yf), int(zf) - - x_byte = x // 8 - x_off = 7 - (x % 8) - if image[z, y, x_byte] & (1 << x_off): - matches += 1 - - return 0 if in_sensor == 0 else float(matches)/float(in_sensor) - - -# ============================================================================== -# %% DIFFRACTION SIMULATION -# ============================================================================== - -def get_simulate_diffractions(grain_params, experiment, - cache_file='gold_cubes.npy', - controller=None): - """getter functions that handles the caching of the simulation""" - try: - image_stack = np.load(cache_file, mmap_mode='r', allow_pickle=False) - except Exception: - image_stack = simulate_diffractions(grain_params, experiment, - controller=controller) - np.save(cache_file, image_stack) - - controller.handle_result('image_stack', image_stack) - - return image_stack - - -def simulate_diffractions(grain_params, experiment, controller): - """actual forward simulation of the diffraction""" - - # use a packed array for the image_stack - array_dims = (experiment.nframes, - experiment.ncols, - ((experiment.nrows - 1)//8) + 1) - image_stack = np.zeros(array_dims, dtype=np.uint8) - - count = len(grain_params) - subprocess = 'simulate diffractions' - - _project = xrdutil._project_on_detector_plane - rD = experiment.rMat_d - chi = experiment.chi - tD = experiment.tVec_d - tS = experiment.tVec_s - distortion = experiment.distortion - - eta_range = [(-np.pi, np.pi), ] - ome_range = experiment.ome_range - ome_period = (-np.pi, np.pi) - - full_hkls = xrdutil._fetch_hkls_from_planedata(experiment.plane_data) - bMat = experiment.plane_data.latVecOps['B'] - wlen = experiment.plane_data.wavelength - - controller.start(subprocess, count) - for i in range(count): - rC = xfcapi.makeRotMatOfExpMap(grain_params[i][0:3]) - tC = np.ascontiguousarray(grain_params[i][3:6]) - vInv_s = np.ascontiguousarray(grain_params[i][6:12]) - ang_list = np.vstack(xfcapi.oscillAnglesOfHKLs(full_hkls[:, 1:], chi, - rC, bMat, wlen, - vInv=vInv_s)) - # hkls not needed here - all_angs, _ = xrdutil._filter_hkls_eta_ome(full_hkls, ang_list, - eta_range, ome_range) - all_angs[:, 2] = xfcapi.mapAngle(all_angs[:, 2], ome_period) - - proj_pts = _project(all_angs, rD, rC, chi, tD, - tC, tS, distortion) - det_xy = proj_pts[0] - _write_pixels(det_xy, all_angs[:, 2], image_stack, experiment.base, - experiment.inv_deltas, experiment.clip_vals) - - controller.update(i + 1) - - controller.finish(subprocess) - return image_stack - - -# ============================================================================== -# %% IMAGE DILATION -# ============================================================================== - - -def get_dilated_image_stack(image_stack, experiment, controller, - cache_file='gold_cubes_dilated.npy'): - - try: - dilated_image_stack = np.load(cache_file, mmap_mode='r', - allow_pickle=False) - except Exception: - dilated_image_stack = dilate_image_stack(image_stack, experiment, - controller) - np.save(cache_file, dilated_image_stack) - - return dilated_image_stack - - -def dilate_image_stack(image_stack, experiment, controller): - # first, perform image dilation =========================================== - # perform image dilation (using scikit_image dilation) - subprocess = 'dilate image_stack' - dilation_shape = np.ones((2*experiment.row_dilation + 1, - 2*experiment.col_dilation + 1), - dtype=np.uint8) - image_stack_dilated = np.empty_like(image_stack) - dilated = np.empty( - (image_stack.shape[-2], image_stack.shape[-1] << 3), - dtype=bool - ) - n_images = len(image_stack) - controller.start(subprocess, n_images) - for i_image in range(n_images): - to_dilate = np.unpackbits(image_stack[i_image], axis=-1) - ski_dilation(to_dilate, dilation_shape, - out=dilated) - image_stack_dilated[i_image] = np.packbits(dilated, axis=-1) - controller.update(i_image + 1) - controller.finish(subprocess) - - return image_stack_dilated - - -# This part is critical for the performance of simulate diffractions. It -# basically "renders" the "pixels". It takes the coordinates, quantizes to an -# image coordinate and writes to the appropriate image in the stack. Note -# that it also performs clipping based on inv_deltas and clip_vals. -# -# Note: This could be easily modified so that instead of using an array of -# booleans, an array of uint8 could be used so the image is stored -# with a bit per pixel. - -@numba.njit(nogil=True, cache=True) -def _write_pixels(coords, angles, image, base, inv_deltas, clip_vals): - count = len(coords) - for i in range(count): - x = int(np.floor((coords[i, 0] - base[0]) * inv_deltas[0])) - - if x < 0 or x >= clip_vals[0]: - continue - - y = int(np.floor((coords[i, 1] - base[1]) * inv_deltas[1])) - - if y < 0 or y >= clip_vals[1]: - continue - - z = int(np.floor((angles[i] - base[2]) * inv_deltas[2])) - - x_byte = x // 8 - x_off = 7 - (x % 8) - image[z, y, x_byte] |= (1 << x_off) - -def get_offset_size(n_coords): - offset = 0 - size = n_coords - if USE_MPI: - coords_per_rank = n_coords // world_size - offset = rank * coords_per_rank - - size = coords_per_rank - if rank == world_size - 1: - size = n_coords - offset - - return (offset, size) - -def gather_confidence(controller, confidence, n_grains, n_coords): - if rank == 0: - global_confidence = np.empty(n_grains * n_coords, dtype=np.float64) - else: - global_confidence = None - - # Calculate the send buffer sizes - coords_per_rank = n_coords // world_size - send_counts = np.full(world_size, coords_per_rank * n_grains) - send_counts[-1] = (n_coords - (coords_per_rank * (world_size-1))) * n_grains - - if rank == 0: - # Time how long it takes to perform the MPI gather - controller.start('gather_confidence', 1) - - # Transpose so the data will be more easily re-shaped into its final shape - # Must be flattened as well so the underlying data is modified... - comm.Gatherv(confidence.T.flatten(), (global_confidence, send_counts), root=0) - if rank == 0: - controller.finish('gather_confidence') - confidence = global_confidence.reshape(n_coords, n_grains).T - controller.handle_result("confidence", confidence) - -# ============================================================================== -# %% ORIENTATION TESTING -# ============================================================================== -def test_orientations(image_stack, experiment, controller): - """grand loop precomputing the grown image stack - - image-stack -- is the dilated image stack to be tested against. - - experiment -- A bunch of experiment related parameters. - - controller -- An external object implementing the hooks to notify progress - as well as figuring out what to do with results. - """ - - # extract some information needed ========================================= - # number of grains, number of coords (maybe limited by call), projection - # function to use, chunk size to use if multiprocessing and the number - # of cpus. - n_grains = experiment.n_grains - chunk_size = controller.get_chunk_size() - ncpus = controller.get_process_count() - - # generate angles ========================================================= - # all_angles will be a list containing arrays for the different angles to - # use, one entry per grain. - # - # Note that the angle generation is driven by the exp_maps - # in the experiment - all_angles = evaluate_diffraction_angles(experiment, controller) - - # generate coords ========================================================= - # The grid of coords to use to test - test_crds = generate_test_grid(-0.25, 0.25, 101) - n_coords = controller.limit('coords', len(test_crds)) - - # precompute per-grain stuff ============================================== - # gVec_cs and rmat_ss can be precomputed, do so. - subprocess = 'precompute gVec_cs' - controller.start(subprocess, len(all_angles)) - precomp = [] - for i, angs in enumerate(all_angles): - rmat_ss = xfcapi.make_sample_rmat(experiment.chi, angs[:, 2]) - gvec_cs = _anglesToGVec(angs, rmat_ss, experiment.rMat_c[i]) - precomp.append((gvec_cs, rmat_ss)) - controller.finish(subprocess) - - # Divide coords by ranks - (offset, size) = get_offset_size(n_coords) - - # grand loop ============================================================== - # The near field simulation 'grand loop'. Where the bulk of computing is - # performed. We are looking for a confidence matrix that has a n_grains - chunks = range(offset, offset+size, chunk_size) - - subprocess = 'grand_loop' - controller.start(subprocess, n_coords) - finished = 0 - ncpus = min(ncpus, len(chunks)) - - logging.info(f'For {rank=}, {offset=}, {size=}, {chunks=}, {len(chunks)=}, {ncpus=}') - - logging.info('Checking confidence for %d coords, %d grains.', - n_coords, n_grains) - confidence = np.empty((n_grains, size)) - if ncpus > 1: - global _multiprocessing_start_method - logging.info('Running multiprocess %d processes (%s)', - ncpus, _multiprocessing_start_method) - with grand_loop_pool(ncpus=ncpus, - state=(chunk_size, - image_stack, - all_angles, precomp, - test_crds, experiment)) as pool: - for rslice, rvalues in pool.imap_unordered(multiproc_inner_loop, - chunks): - count = rvalues.shape[1] - # We need to adjust this slice for the offset - rslice = slice(rslice.start - offset, rslice.stop - offset) - confidence[:, rslice] = rvalues - finished += count - controller.update(finished) - else: - logging.info('Running in a single process') - for chunk_start in chunks: - chunk_stop = min(n_coords, chunk_start+chunk_size) - rslice, rvalues = _grand_loop_inner( - image_stack, all_angles, - precomp, test_crds, experiment, - start=chunk_start, - stop=chunk_stop - ) - count = rvalues.shape[1] - # We need to adjust this slice for the offset - rslice = slice(rslice.start - offset, rslice.stop - offset) - confidence[:, rslice] = rvalues - finished += count - controller.update(finished) - - controller.finish(subprocess) - - # Now gather result to rank 0 - if USE_MPI: - gather_confidence(controller, confidence, n_grains, n_coords) - else: - controller.handle_result("confidence", confidence) - - -def evaluate_diffraction_angles(experiment, controller=None): - """Uses simulateGVecs to generate the angles used per each grain. - returns a list containg one array per grain. - - experiment -- a bag of experiment values, including the grains specs - and other required parameters. - """ - # extract required data from experiment - exp_maps = experiment.exp_maps - plane_data = experiment.plane_data - detector_params = experiment.detector_params - pixel_size = experiment.pixel_size - ome_range = experiment.ome_range - ome_period = experiment.ome_period - - panel_dims_expanded = [(-10, -10), (10, 10)] - subprocess = 'evaluate diffraction angles' - pbar = controller.start(subprocess, len(exp_maps)) - all_angles = [] - ref_gparams = np.array([0., 0., 0., 1., 1., 1., 0., 0., 0.]) - for i, exp_map in enumerate(exp_maps): - gparams = np.hstack([exp_map, ref_gparams]) - sim_results = xrdutil.simulateGVecs(plane_data, - detector_params, - gparams, - panel_dims=panel_dims_expanded, - pixel_pitch=pixel_size, - ome_range=ome_range, - ome_period=ome_period, - distortion=None) - all_angles.append(sim_results[2]) - controller.update(i + 1) - pass - controller.finish(subprocess) - - return all_angles - - -def _grand_loop_inner(image_stack, angles, precomp, - coords, experiment, start=0, stop=None): - """Actual simulation code for a chunk of data. It will be used both, - in single processor and multiprocessor cases. Chunking is performed - on the coords. - - image_stack -- the image stack from the sensors - angles -- the angles (grains) to test - coords -- all the coords to test - precomp -- (gvec_cs, rmat_ss) precomputed for each grain - experiment -- bag with experiment parameters - start -- chunk start offset - stop -- chunk end offset - """ - - t = timeit.default_timer() - n_coords = len(coords) - n_angles = len(angles) - - # experiment geometric layout parameters - rD = experiment.rMat_d - rCn = experiment.rMat_c - tD = experiment.tVec_d - tS = experiment.tVec_s - - # experiment panel related configuration - base = experiment.base - inv_deltas = experiment.inv_deltas - clip_vals = experiment.clip_vals - distortion = experiment.distortion - - _to_detector = xfcapi.gvec_to_xy - # _to_detector = _gvec_to_detector_array - stop = min(stop, n_coords) if stop is not None else n_coords - - # FIXME: distortion hanlding is broken! - distortion_fn = None - if distortion is not None and len(distortion > 0): - distortion_fn, distortion_args = distortion - - acc_detector = 0.0 - acc_distortion = 0.0 - acc_quant_clip = 0.0 - confidence = np.zeros((n_angles, stop-start)) - grains = 0 - crds = 0 - - if distortion_fn is None: - for igrn in range(n_angles): - angs = angles[igrn] - rC = rCn[igrn] - gvec_cs, rMat_ss = precomp[igrn] - grains += 1 - for icrd in range(start, stop): - t0 = timeit.default_timer() - det_xy = _to_detector( - gvec_cs, rD, rMat_ss, rC, tD, tS, coords[icrd] - ) - t1 = timeit.default_timer() - c = _quant_and_clip_confidence(det_xy, angs[:, 2], image_stack, - base, inv_deltas, clip_vals) - t2 = timeit.default_timer() - acc_detector += t1 - t0 - acc_quant_clip += t2 - t1 - crds += 1 - confidence[igrn, icrd - start] = c - else: - for igrn in range(n_angles): - angs = angles[igrn] - rC = rCn[igrn] - gvec_cs, rMat_ss = precomp[igrn] - grains += 1 - for icrd in range(start, stop): - t0 = timeit.default_timer() - det_xy = _to_detector( - gvec_cs, rD, rMat_ss, rC, tD, tS, coords[icrd] - ) - t1 = timeit.default_timer() - det_xy = distortion_fn(tmp_xys, distortion_args, invert=True) - t2 = timeit.default_timer() - c = _quant_and_clip_confidence(det_xy, angs[:, 2], image_stack, - base, inv_deltas, clip_vals) - t3 = timeit.default_timer() - acc_detector += t1 - t0 - acc_distortion += t2 - t1 - acc_quant_clip += t3 - t2 - crds += 1 - confidence[igrn, icrd - start] = c - - t = timeit.default_timer() - t - return slice(start, stop), confidence - - -def generate_test_grid(low, top, samples): - """generates a test grid of coordinates""" - cvec_s = np.linspace(low, top, samples) - Xs, Ys, Zs = np.meshgrid(cvec_s, cvec_s, cvec_s) - return np.vstack([Xs.flatten(), Ys.flatten(), Zs.flatten()]).T - - -# Multiprocessing bits ======================================================== -# -# The parallellized part of test_orientations uses some big arrays as part of -# the state that needs to be communicated to the spawn processes. -# -# On fork platforms, take advantage of process memory inheritance. -# -# On non fork platforms, rely on joblib dumping the state to disk and loading -# back in the target processes, pickling only the minimal information to load -# state back. Pickling the big arrays directly was causing memory errors and -# would be less efficient in memory (as joblib memmaps by default the big -# arrays, meaning they may be shared between processes). - -def multiproc_inner_loop(chunk): - """function to use in multiprocessing that computes the simulation over the - task's alloted chunk of data""" - - chunk_size = _mp_state[0] - n_coords = len(_mp_state[4]) - - (offset, size) = get_offset_size(n_coords) - - chunk_stop = min(offset+size, chunk+chunk_size) - return _grand_loop_inner(*_mp_state[1:], start=chunk, stop=chunk_stop) - - -def worker_init(id_state, id_exp): - """process initialization function. This function is only used when the - child processes are spawned (instead of forked). When using the fork model - of multiprocessing the data is just inherited in process memory.""" - import joblib - - global _mp_state - state = joblib.load(id_state) - experiment = joblib.load(id_exp) - _mp_state = state + (experiment,) - - -@contextlib.contextmanager -def grand_loop_pool(ncpus, state): - """function that handles the initialization of multiprocessing. It handles - properly the use of spawned vs forked multiprocessing. The multiprocessing - can be either 'fork' or 'spawn', with 'spawn' being required in non-fork - platforms (like Windows) and 'fork' being preferred on fork platforms due - to its efficiency. - """ - # state = ( chunk_size, - # image_stack, - # angles, - # precomp, - # coords, - # experiment ) - global _multiprocessing_start_method - - multiprocessing.set_start_method(_multiprocessing_start_method) - - if _multiprocessing_start_method == 'fork': - # Use FORK multiprocessing. - - # All read-only data can be inherited in the process. So we "pass" it - # as a global that the child process will be able to see. At the end of - # theprocessing the global is removed. - global _mp_state - _mp_state = state - pool = multiprocessing.Pool(ncpus) - yield pool - del (_mp_state) - else: - # Use SPAWN multiprocessing. - - # As we can not inherit process data, all the required data is - # serialized into a temporary directory using joblib. The - # multiprocessing pool will have the "worker_init" as initialization - # function that takes the key for the serialized data, which will be - # used to load the parameter memory into the spawn process (also using - # joblib). In theory, joblib uses memmap for arrays if they are not - # compressed, so no compression is used for the bigger arrays. - import joblib - tmp_dir = tempfile.mkdtemp(suffix='-nf-grand-loop') - try: - # dumb dumping doesn't seem to work very well.. do something ad-hoc - logging.info('Using "%s" as temporary directory.', tmp_dir) - - id_exp = joblib.dump(state[-1], - os.path.join(tmp_dir, - 'grand-loop-experiment.gz'), - compress=True) - id_state = joblib.dump(state[:-1], - os.path.join(tmp_dir, 'grand-loop-data')) - pool = multiprocessing.Pool(ncpus, worker_init, - (id_state[0], id_exp[0])) - yield pool - finally: - logging.info('Deleting "%s".', tmp_dir) - shutil.rmtree(tmp_dir) - - -# ============================================================================== -# %% SCRIPT ENTRY AND PARAMETER HANDLING -# ============================================================================== -def main(args, controller): - grain_params, experiment = mockup_experiment() - controller.handle_result('experiment', experiment) - controller.handle_result('grain_params', grain_params) - image_stack = get_simulate_diffractions(grain_params, experiment, - controller=controller) - image_stack = get_dilated_image_stack(image_stack, experiment, - controller) - - test_orientations(image_stack, experiment, - controller=controller) - - -def parse_args(): - try: - default_ncpus = multiprocessing.cpu_count() - except NotImplementedError: - default_ncpus = 1 - - parser = argparse.ArgumentParser() - parser.add_argument("--inst-profile", action='append', default=[], - help="instrumented profile") - parser.add_argument("--generate", - help="generate file with intermediate results") - parser.add_argument("--check", - help="check against an file with intermediate results") - parser.add_argument("--limit", type=int, - help="limit the size of the run") - parser.add_argument("--ncpus", type=int, default=default_ncpus, - help="number of processes to use") - parser.add_argument("--chunk-size", type=int, default=100, - help="chunk size for use in multiprocessing/reporting") - parser.add_argument("--force-spawn-multiprocessing", action='store_true', - help="force using spawn as the multiprocessing method") - args = parser.parse_args() - - ''' - keys = [ - 'inst_profile', - 'generate', - 'check', - 'limit', - 'ncpus', - 'chunk_size'] - print( - '\n'.join([': '.join([key, str(getattr(args, key))]) for key in keys]) - ) - ''' - return args - - -def build_controller(args): - # builds the controller to use based on the args - - # result handle - try: - import progressbar - progress_handler = progressbar_progress_observer() - except ImportError: - progress_handler = null_progress_observer() - - if args.check is not None: - if args.generate is not None: - logging.warn( - "generating and checking can not happen at the same time, " - + "going with checking") - - result_handler = checking_result_handler(args.check) - elif args.generate is not None: - result_handler = saving_result_handler(args.generate) - else: - result_handler = forgetful_result_handler() - - # if args.ncpus > 1 and os.name == 'nt': - # logging.warn("Multiprocessing on Windows is disabled for now") - # args.ncpus = 1 - - controller = ProcessController(result_handler, progress_handler, - ncpus=args.ncpus, - chunk_size=args.chunk_size) - if args.limit is not None: - controller.set_limit('coords', lambda x: min(x, args.limit)) - - return controller - - -# assume that if os has fork, it will be used by multiprocessing. -# note that on python > 3.4 we could use multiprocessing get_start_method and -# set_start_method for a cleaner implementation of this functionality. -_multiprocessing_start_method = 'fork' if hasattr(os, 'fork') else 'spawn' - -if __name__ == '__main__': - LOG_LEVEL = logging.INFO - FORMAT="%(relativeCreated)12d [%(process)6d/%(thread)6d] %(levelname)8s: %(message)s" - - logging.basicConfig(level=LOG_LEVEL, format=FORMAT) - - # Setting the root log level via logging.basicConfig() doesn't always work. - # The next line ensures that it will get set. - logging.getLogger().setLevel(LOG_LEVEL) - - args = parse_args() - - if len(args.inst_profile) > 0: - from hexrd.utils import profiler - - logging.debug("Instrumenting functions") - profiler.instrument_all(args.inst_profile) - - if args.force_spawn_multiprocessing: - _multiprocessing_start_method = 'spawn' - - controller = build_controller(args) - main(args, controller) - del controller - - if len(args.inst_profile) > 0: - logging.debug("Dumping profiler results") - profiler.dump_results(args.inst_profile) diff --git a/hexrd/xrdutil/phutil.py b/hexrd/xrdutil/phutil.py index d09ae4ef4..be702afe5 100644 --- a/hexrd/xrdutil/phutil.py +++ b/hexrd/xrdutil/phutil.py @@ -10,12 +10,12 @@ from concurrent.futures import ThreadPoolExecutor import numpy as np +from numba import njit from hexrd import constants as ct from hexrd.instrument import Detector from hexrd.transforms import xfcapi from hexrd.utils.concurrent import distribute_tasks -from hexrd.utils.decorators import numba_njit_if_available class SampleLayerDistortion: @@ -201,6 +201,17 @@ def tth_corr_sample_layer(panel, xy_pts, return np.vstack([-tth_corr, ref_angs[:, 1]]).T +def invalidate_past_critical_beta(panel: Detector, xy_pts: np.ndarray, + pinhole_thickness: float, + pinhole_radius: float) -> None: + """Set any xy_pts past critical beta to be nan""" + # Compute the critical beta angle. Anything past this is invalid. + critical_beta = np.arctan(2 * pinhole_radius / pinhole_thickness) + dhats = xfcapi.unitRowVector(panel.cart_to_dvecs(xy_pts)) + cos_beta = -dhats[:, 2] + xy_pts[np.arccos(cos_beta) > critical_beta] = np.nan + + def tth_corr_map_sample_layer(instrument, layer_standoff, layer_thickness, pinhole_thickness, pinhole_radius): @@ -731,7 +742,7 @@ def _compute_vi_qq_i(phi_d, sin_b, bd, sin_phii, cos_phii, alpha_i, phi_xi, # The numba version (works better in conjunction with multi-threading) -_compute_vi_qq_i_numba = numba_njit_if_available( +_compute_vi_qq_i_numba = njit( nogil=True, cache=True)(_compute_vi_qq_i) diff --git a/hexrd/xrdutil/utils.py b/hexrd/xrdutil/utils.py index 6b1e6c3bc..2494e7256 100644 --- a/hexrd/xrdutil/utils.py +++ b/hexrd/xrdutil/utils.py @@ -26,7 +26,14 @@ # Boston, MA 02111-1307 USA or visit . # ============================================================ + +from typing import Optional, Union, Any, Generator +from hexrd.material.crystallography import PlaneData +from hexrd.distortion.distortionabc import DistortionABC + +import numba import numpy as np +import numba from hexrd import constants from hexrd import matrixutil as mutil @@ -36,15 +43,14 @@ from hexrd.transforms import xf from hexrd.transforms import xfcapi - from hexrd.valunits import valWUnit from hexrd import distortion as distortion_pkg -from hexrd.constants import USE_NUMBA -if USE_NUMBA: - import numba +from hexrd.deprecation import deprecated + +simlp = 'hexrd.instrument.hedm_instrument.HEDMInstrument.simulate_laue_pattern' # ============================================================================= # PARAMETERS @@ -55,14 +61,14 @@ d2r = piby180 = constants.d2r r2d = constants.r2d -epsf = constants.epsf # ~2.2e-16 -ten_epsf = 10 * epsf # ~2.2e-15 +epsf = constants.epsf # ~2.2e-16 +ten_epsf = 10 * epsf # ~2.2e-15 sqrt_epsf = constants.sqrt_epsf # ~1.5e-8 bHat_l_DFLT = constants.beam_vec.flatten() eHat_l_DFLT = constants.eta_vec.flatten() -nans_1x2 = np.nan*np.ones((1, 2)) +nans_1x2 = np.nan * np.ones((1, 2)) # ============================================================================= # CLASSES @@ -76,9 +82,8 @@ class EtaOmeMaps(object): reference to an open file object, which is not pickleable. """ - def __init__(self, ome_eta_archive): - - ome_eta = np.load(ome_eta_archive, allow_pickle=True) + def __init__(self, ome_eta_archive: str): + ome_eta: np.ndarray = np.load(ome_eta_archive, allow_pickle=True) planeData_args = ome_eta['planeData_args'] planeData_hkls = ome_eta['planeData_hkls'] @@ -91,11 +96,7 @@ def __init__(self, ome_eta_archive): self.etas = ome_eta['etas'] self.omegas = ome_eta['omegas'] - def save(self, filename): - self.save_eta_ome_maps(self, filename) - - @staticmethod - def save_eta_ome_maps(eta_ome, filename): + def save_eta_ome_maps(self, filename: str) -> None: """ eta_ome.dataStore eta_ome.planeData @@ -105,20 +106,21 @@ def save_eta_ome_maps(eta_ome, filename): eta_ome.etas eta_ome.omegas """ - args = np.array(eta_ome.planeData.getParams(), dtype=object)[:4] + args = np.array(self.planeData.getParams(), dtype=object)[:4] args[2] = valWUnit('wavelength', 'length', args[2], 'angstrom') - hkls = np.vstack([i['hkl'] for i in eta_ome.planeData.hklDataList]).T - save_dict = {'dataStore': eta_ome.dataStore, - 'etas': eta_ome.etas, - 'etaEdges': eta_ome.etaEdges, - 'iHKLList': eta_ome.iHKLList, - 'omegas': eta_ome.omegas, - 'omeEdges': eta_ome.omeEdges, - 'planeData_args': args, - 'planeData_hkls': hkls, - 'planeData_excl': eta_ome.planeData.exclusions} + hkls = np.vstack([i['hkl'] for i in self.planeData.hklDataList]).T + save_dict = { + 'dataStore': self.dataStore, + 'etas': self.etas, + 'etaEdges': self.etaEdges, + 'iHKLList': self.iHKLList, + 'omegas': self.omegas, + 'omeEdges': self.omeEdges, + 'planeData_args': args, + 'planeData_hkls': hkls, + 'planeData_excl': self.planeData.exclusions, + } np.savez_compressed(filename, **save_dict) - pass # end of class: EtaOmeMaps # ============================================================================= @@ -126,86 +128,19 @@ def save_eta_ome_maps(eta_ome, filename): # ============================================================================= -def _zproject(x, y): +def _zproject(x: np.ndarray, y: np.ndarray): return np.cos(x) * np.sin(y) - np.sin(x) * np.cos(y) -def _convert_angles(tth_eta, detector, - rmat_s, tvec_s, tvec_c, - beam_vector=constants.beam_vec, - eta_vector=constants.eta_vec): - """ - Coverts frame-local angles to effective angles in the LAB reference frame. - - Operates on a detector instance in lieu of instrument. - - Parameters - ---------- - tth_eta : TYPE - DESCRIPTION. - detector : TYPE - DESCRIPTION. - rmat_s : TYPE - DESCRIPTION. - tvec_c : TYPE - DESCRIPTION. - beam_vector : TYPE, optional - DESCRIPTION. The default is constants.beam_vec. - eta_vector : TYPE, optional - DESCRIPTION. The default is constants.eta_vec. - - Returns - ------- - tth_eta_ref : TYPE - DESCRIPTION. - - Notes - ----- - FIXME: This API won't work for rotation series data - """ - - tth_eta = np.atleast_2d(tth_eta) - - chi = np.arctan2(rmat_s[2, 1], rmat_s[1, 1]) - ome = np.arctan2(rmat_s[0, 2], rmat_s[0, 0]) - - # !!! reform rmat_s to be consistent with def in geometric model - rmat_s = xfcapi.make_sample_rmat(chi, ome) - rmat_c = constants.identity_3x3 - # tvec_s = constants.zeros_3 - tvec_c_ref = constants.zeros_3 - - # FIXME: doesn't work for rotation series with different ome yet. - full_angs = np.hstack([tth_eta, ome*np.ones((len(tth_eta), 1))]) - - # convert to gvectors using trivial crystal frame - gvec_s = xfcapi.angles_to_gvec( - full_angs, beam_vec=beam_vector, eta_vec=eta_vector, chi=chi - ) - - # convert to detector points - det_xys = xfcapi.gvec_to_xy( - gvec_s, - detector.rmat, rmat_s, rmat_c, - detector.tvec, tvec_s, tvec_c, - beam_vec=beam_vector - ) - - # convert to angles in LAB ref - tth_eta_ref, _ = xfcapi.detectorXYToGvec( - det_xys, detector.rmat, rmat_s, detector.tvec, tvec_s, tvec_c_ref, - beamVec=beam_vector, etaVec=eta_vector - ) - - return np.vstack(tth_eta_ref).T - - -def zproject_sph_angles(invecs, chi=0., - method='stereographic', - source='d', - use_mask=False, - invert_z=False, - rmat=None): +def zproject_sph_angles( + invecs: np.ndarray, + chi: float = 0.0, + method: str = 'stereographic', + source: str = 'd', + use_mask: bool = False, + invert_z: bool = False, + rmat: Optional[np.ndarray] = None, +) -> Union[np.ndarray, tuple[np.ndarray, np.ndarray]]: """ Projects spherical angles to 2-d mapping. @@ -273,26 +208,24 @@ def zproject_sph_angles(invecs, chi=0., if use_mask: pzi = spts_s[:, 2] <= 0 spts_s = spts_s[pzi, :] - npts_s = len(spts_s) if method.lower() == 'stereographic': - ppts = np.vstack([ - spts_s[:, 0]/(1. - spts_s[:, 2]), - spts_s[:, 1]/(1. - spts_s[:, 2]) - ]).T + ppts = np.vstack( + [ + spts_s[:, 0] / (1.0 - spts_s[:, 2]), + spts_s[:, 1] / (1.0 - spts_s[:, 2]), + ] + ).T elif method.lower() == 'equal-area': - chords = spts_s + np.tile([0, 0, 1], (npts_s, 1)) + chords = spts_s + np.tile([0, 0, 1], (len(spts_s), 1)) scl = np.tile(xfcapi.rowNorm(chords), (2, 1)).T ucrd = mutil.unitVector( - np.hstack([ - chords[:, :2], - np.zeros((len(spts_s), 1)) - ]).T + np.hstack([chords[:, :2], np.zeros((len(spts_s), 1))]).T ) ppts = ucrd[:2, :].T * scl else: - raise RuntimeError("method '%s' not recognized" % method) + raise RuntimeError(f"method '{method}' not recognized") if use_mask: return ppts, pzi @@ -300,49 +233,41 @@ def zproject_sph_angles(invecs, chi=0., return ppts -def make_polar_net(ndiv=24, projection='stereographic', max_angle=120.): +def make_polar_net( + ndiv: int = 24, projection: str = 'stereographic', max_angle: float = 120.0 +) -> np.ndarray: """ TODO: options for generating net boundaries; fixed to Z proj. """ - ndiv_tth = int(np.floor(0.5*ndiv)) + 1 + ndiv_tth = int(np.floor(0.5 * ndiv)) + 1 wtths = np.radians( - np.linspace(0, 1, num=ndiv_tth, endpoint=True)*max_angle - ) - wetas = np.radians( - np.linspace(-1, 1, num=ndiv+1, endpoint=True)*180. - ) - weta_gen = np.radians( - np.linspace(-1, 1, num=181, endpoint=True)*180. + np.linspace(0, 1, num=ndiv_tth, endpoint=True) * max_angle ) + wetas = np.radians(np.linspace(-1, 1, num=ndiv + 1, endpoint=True) * 180.0) + weta_gen = np.radians(np.linspace(-1, 1, num=181, endpoint=True) * 180.0) pts = [] for eta in wetas: - net_angs = np.vstack([[wtths[0], wtths[-1]], - np.tile(eta, 2), - np.zeros(2)]).T - projp = zproject_sph_angles(net_angs, method=projection, source='d') - pts.append(projp) - pts.append(np.nan*np.ones((1, 2))) + net_ang = np.vstack( + [[wtths[0], wtths[-1]], np.tile(eta, 2), np.zeros(2)] + ).T + pts.append(zproject_sph_angles(net_ang, method=projection, source='d')) + pts.append(np.nan * np.ones((1, 2))) for tth in wtths[1:]: - net_angs = np.vstack([tth*np.ones_like(weta_gen), - weta_gen, - np.zeros_like(weta_gen)]).T - projp = zproject_sph_angles(net_angs, method=projection, source='d') - pts.append(projp) + net_ang = np.vstack( + [tth * np.ones_like(weta_gen), weta_gen, np.zeros_like(weta_gen)] + ).T + pts.append(zproject_sph_angles(net_ang, method=projection, source='d')) pts.append(nans_1x2) - ''' - # old method - for tth in wtths: - net_angs = np.vstack([tth*np.ones_like(wetas), - wetas, - piby2*np.ones_like(wetas)]).T - projp = zproject_sph_angles(net_angs, method=projection) - pts.append(projp) - ''' - pts = np.vstack(pts) - return pts - - -def validateAngleRanges(angList, startAngs, stopAngs, ccw=True): + + return np.vstack(pts) + + +def validateAngleRanges( + angList: Union[np.ndarray, list[float]], + startAngs: Union[np.ndarray, list[float]], + stopAngs: Union[np.ndarray, list[float]], + ccw: bool = True, +) -> np.ndarray: """ Indetify angles that fall within specified ranges. @@ -352,13 +277,12 @@ def validateAngleRanges(angList, startAngs, stopAngs, ccw=True): There is, of course an ambigutiy if the start and stop angle are the same; we treat them as implying 2*pi """ - angList = np.atleast_1d(angList).flatten() # needs to have len - startAngs = np.atleast_1d(startAngs).flatten() # needs to have len - stopAngs = np.atleast_1d(stopAngs).flatten() # needs to have len + angList = np.atleast_1d(angList).flatten() + startAngs = np.atleast_1d(startAngs).flatten() + stopAngs = np.atleast_1d(stopAngs).flatten() - n_ranges = len(startAngs) - assert len(stopAngs) == n_ranges, \ - "length of min and max angular limits must match!" + if len(startAngs) != len(stopAngs): + raise ValueError("start and stop angles must have same length") # to avoid warnings in >=, <= later down, mark nans; # need these to trick output to False in the case of nan input @@ -367,7 +291,7 @@ def validateAngleRanges(angList, startAngs, stopAngs, ccw=True): reflInRange = np.zeros(angList.shape, dtype=bool) # bin length for chunking - binLen = np.pi / 2. + binLen = np.pi / 2.0 # in plane vectors defining wedges x0 = np.vstack([np.cos(startAngs), np.sin(startAngs)]) @@ -375,44 +299,43 @@ def validateAngleRanges(angList, startAngs, stopAngs, ccw=True): # dot products dp = np.sum(x0 * x1, axis=0) - if np.any(dp >= 1. - sqrt_epsf) and n_ranges > 1: + if np.any(dp >= 1.0 - sqrt_epsf) and len(startAngs) > 1: # ambiguous case raise RuntimeError( - "Improper usage; " + - "at least one of your ranges is alread 360 degrees!") - elif dp[0] >= 1. - sqrt_epsf and n_ranges == 1: + "Improper usage; " + + "at least one of your ranges is already 360 degrees!" + ) + elif dp[0] >= 1.0 - sqrt_epsf and len(startAngs) == 1: # trivial case! reflInRange = np.ones(angList.shape, dtype=bool) reflInRange[nan_mask] = False else: # solve for arc lengths # ...note: no zeros should have made it here - a = x0[0, :]*x1[1, :] - x0[1, :]*x1[0, :] - b = x0[0, :]*x1[0, :] + x0[1, :]*x1[1, :] + a = x0[0, :] * x1[1, :] - x0[1, :] * x1[0, :] + b = x0[0, :] * x1[0, :] + x0[1, :] * x1[1, :] phi = np.arctan2(b, a) - arclen = 0.5*np.pi - phi # these are clockwise + arclen = 0.5 * np.pi - phi # these are clockwise cw_phis = arclen < 0 - arclen[cw_phis] = 2*np.pi + arclen[cw_phis] # all positive (CW) now + arclen[cw_phis] += 2 * np.pi # all positive (CW) now if not ccw: - arclen = 2*np.pi - arclen + arclen = 2 * np.pi - arclen - if sum(arclen) > 2*np.pi: + if sum(arclen) > 2 * np.pi: raise RuntimeWarning( - "Specified angle ranges sum to > 360 degrees, " + - "which is suspect...") + "Specified angle ranges sum to > 360 degrees, " + + "which is suspect..." + ) - # check that there are no more thandp = np.zeros(n_ranges) - for i in range(n_ranges): + # check that there are no more than dp = np.zeros(len(startAngs)) + for i in range(len(startAngs)): # number or subranges using 'binLen' - numSubranges = int(np.ceil(arclen[i]/binLen)) + numSubranges = int(np.ceil(arclen[i] / binLen)) - # check remaider + # check remainder binrem = np.remainder(arclen[i], binLen) - if binrem == 0: - finalBinLen = binLen - else: - finalBinLen = binrem + finalBinLen = binLen if binrem == 0 else binrem # if clockwise, negate bin length if not ccw: @@ -422,32 +345,43 @@ def validateAngleRanges(angList, startAngs, stopAngs, ccw=True): # Create sub ranges on the fly to avoid ambiguity in dot product # for wedges >= 180 degrees subRanges = np.array( - [startAngs[i] + binLen*j for j in range(numSubranges)] - + [startAngs[i] + binLen*(numSubranges - 1) + finalBinLen] - ) + [startAngs[i] + binLen * j for j in range(numSubranges)] + + [startAngs[i] + binLen * (numSubranges - 1) + finalBinLen] + ) for k in range(numSubranges): zStart = _zproject(angList, subRanges[k]) zStop = _zproject(angList, subRanges[k + 1]) if ccw: - zStart[nan_mask] = 999. - zStop[nan_mask] = -999. - reflInRange = \ - reflInRange | np.logical_and(zStart <= 0, zStop >= 0) + zStart[nan_mask] = 999.0 + zStop[nan_mask] = -999.0 + reflInRange = reflInRange | np.logical_and( + zStart <= 0, zStop >= 0 + ) else: - zStart[nan_mask] = -999. - zStop[nan_mask] = 999. - reflInRange = \ - reflInRange | np.logical_and(zStart >= 0, zStop <= 0) + zStart[nan_mask] = -999.0 + zStop[nan_mask] = 999.0 + reflInRange = reflInRange | np.logical_and( + zStart >= 0, zStop <= 0 + ) return reflInRange -def simulateOmeEtaMaps(omeEdges, etaEdges, planeData, expMaps, - chi=0., - etaTol=None, omeTol=None, - etaRanges=None, omeRanges=None, - bVec=constants.beam_vec, eVec=constants.eta_vec, - vInv=constants.identity_6x1): +@deprecated(removal_date='2025-01-01') +def simulateOmeEtaMaps( + omeEdges, + etaEdges, + planeData, + expMaps, + chi=0.0, + etaTol=None, + omeTol=None, + etaRanges=None, + omeRanges=None, + bVec=constants.beam_vec, + eVec=constants.eta_vec, + vInv=constants.identity_6x1, +): """ Simulate spherical maps. @@ -506,10 +440,14 @@ def simulateOmeEtaMaps(omeEdges, etaEdges, planeData, expMaps, omeMin = omeEdges[0] omeMax = omeEdges[-1] if omeRanges is None: - omeRanges = [[omeMin, omeMax], ] + omeRanges = [ + [omeMin, omeMax], + ] if etaRanges is None: - etaRanges = [[etaMin, etaMax], ] + etaRanges = [ + [etaMin, etaMax], + ] # signed deltas IN RADIANS del_ome = omeEdges[1] - omeEdges[0] @@ -531,8 +469,9 @@ def simulateOmeEtaMaps(omeEdges, etaEdges, planeData, expMaps, dpix_ome = round(omeTol / abs(del_ome)) dpix_eta = round(etaTol / abs(del_eta)) - i_dil, j_dil = np.meshgrid(np.arange(-dpix_ome, dpix_ome + 1), - np.arange(-dpix_eta, dpix_eta + 1)) + i_dil, j_dil = np.meshgrid( + np.arange(-dpix_ome, dpix_ome + 1), np.arange(-dpix_eta, dpix_eta + 1) + ) # get symmetrically expanded hkls from planeData sym_hkls = planeData.getSymHKLs() @@ -555,24 +494,34 @@ def simulateOmeEtaMaps(omeEdges, etaEdges, planeData, expMaps, for iOr in range(nOrs): rMat_c = xfcapi.makeRotMatOfExpMap(expMaps[iOr, :]) angList = np.vstack( - xfcapi.oscillAnglesOfHKLs(these_hkls, chi, rMat_c, bMat, wlen, - beamVec=bVec, etaVec=eVec, vInv=vInv) + xfcapi.oscillAnglesOfHKLs( + these_hkls, + chi, + rMat_c, + bMat, + wlen, + beamVec=bVec, + etaVec=eVec, + vInv=vInv, ) + ) if not np.all(np.isnan(angList)): # angList[:, 1] = xfcapi.mapAngle( - angList[:, 1], - [etaEdges[0], etaEdges[0]+2*np.pi]) + angList[:, 1], [etaEdges[0], etaEdges[0] + 2 * np.pi] + ) angList[:, 2] = xfcapi.mapAngle( - angList[:, 2], - [omeEdges[0], omeEdges[0]+2*np.pi]) + angList[:, 2], [omeEdges[0], omeEdges[0] + 2 * np.pi] + ) # # do eta ranges angMask_eta = np.zeros(len(angList), dtype=bool) for etas in etaRanges: angMask_eta = np.logical_or( angMask_eta, - xf.validateAngleRanges(angList[:, 1], etas[0], etas[1]) + xf.validateAngleRanges( + angList[:, 1], etas[0], etas[1] + ), ) # do omega ranges @@ -584,7 +533,8 @@ def simulateOmeEtaMaps(omeEdges, etaEdges, planeData, expMaps, angMask_ome = np.logical_or( angMask_ome, xf.validateAngleRanges( - angList[:, 2], omes[0], omes[1], ccw=ccw) + angList[:, 2], omes[0], omes[1], ccw=ccw + ), ) # mask angles list, hkls @@ -615,49 +565,54 @@ def simulateOmeEtaMaps(omeEdges, etaEdges, planeData, expMaps, if culledEtaIdx is not None and culledOmeIdx is not None: if dpix_ome > 0 or dpix_eta > 0: - i_sup = omeIndices[culledOmeIdx] + \ - np.array([i_dil.flatten()], dtype=int) - j_sup = etaIndices[culledEtaIdx] + \ - np.array([j_dil.flatten()], dtype=int) + i_sup = omeIndices[culledOmeIdx] + np.array( + [i_dil.flatten()], dtype=int + ) + j_sup = etaIndices[culledEtaIdx] + np.array( + [j_dil.flatten()], dtype=int + ) # catch shit that falls off detector... # maybe make this fancy enough to wrap at 2pi? idx_mask = np.logical_and( np.logical_and(i_sup >= 0, i_sup < i_max), - np.logical_and(j_sup >= 0, j_sup < j_max)) - eta_ome[iHKL, - i_sup[idx_mask], - j_sup[idx_mask]] = 1. + np.logical_and(j_sup >= 0, j_sup < j_max), + ) + eta_ome[iHKL, i_sup[idx_mask], j_sup[idx_mask]] = ( + 1.0 + ) else: - eta_ome[iHKL, - omeIndices[culledOmeIdx], - etaIndices[culledEtaIdx]] = 1. - pass # close conditional on pixel dilation - pass # close conditional on ranges - pass # close for loop on valid reflections - pass # close conditional for valid angles + eta_ome[ + iHKL, + omeIndices[culledOmeIdx], + etaIndices[culledEtaIdx], + ] = 1.0 return eta_ome -def _fetch_hkls_from_planedata(pd): +def _fetch_hkls_from_planedata(pd: PlaneData): return np.hstack(pd.getSymHKLs(withID=True)).T -def _filter_hkls_eta_ome(hkls, angles, eta_range, ome_range, - return_mask=False): +def _filter_hkls_eta_ome( + hkls: np.ndarray, + angles: np.ndarray, + eta_range: list[tuple[float]], + ome_range: list[tuple[float]], + return_mask: bool = False, +) -> Union[ + tuple[np.ndarray, np.ndarray], tuple[np.ndarray, np.ndarray, np.ndarray] +]: """ given a set of hkls and angles, filter them by the eta and omega ranges """ - # do eta ranges angMask_eta = np.zeros(len(angles), dtype=bool) for etas in eta_range: angMask_eta = np.logical_or( - angMask_eta, - xf.validateAngleRanges(angles[:, 1], etas[0], etas[1]) + angMask_eta, xf.validateAngleRanges(angles[:, 1], etas[0], etas[1]) ) - # do omega ranges ccw = True angMask_ome = np.zeros(len(angles), dtype=bool) for omes in ome_range: @@ -665,10 +620,9 @@ def _filter_hkls_eta_ome(hkls, angles, eta_range, ome_range, ccw = False angMask_ome = np.logical_or( angMask_ome, - xf.validateAngleRanges(angles[:, 2], omes[0], omes[1], ccw=ccw) + xf.validateAngleRanges(angles[:, 2], omes[0], omes[1], ccw=ccw), ) - # mask angles list, hkls angMask = np.logical_and(angMask_eta, angMask_ome) allAngs = angles[angMask, :] @@ -680,26 +634,37 @@ def _filter_hkls_eta_ome(hkls, angles, eta_range, ome_range, return allAngs, allHKLs -def _project_on_detector_plane(allAngs, - rMat_d, rMat_c, chi, - tVec_d, tVec_c, tVec_s, - distortion, - beamVec=constants.beam_vec): +def _project_on_detector_plane( + allAngs: np.ndarray, + rMat_d: np.ndarray, + rMat_c: np.ndarray, + chi: float, + tVec_d: np.ndarray, + tVec_c: np.ndarray, + tVec_s: np.ndarray, + distortion: DistortionABC, + beamVec: np.ndarray = constants.beam_vec, +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """ utility routine for projecting a list of (tth, eta, ome) onto the detector plane parameterized by the args """ - gVec_cs = xfcapi.angles_to_gvec(allAngs, - chi=chi, - rmat_c=rMat_c, - beam_vec=beamVec) + gVec_cs = xfcapi.angles_to_gvec( + allAngs, chi=chi, rmat_c=rMat_c, beam_vec=beamVec + ) rMat_ss = xfcapi.make_sample_rmat(chi, allAngs[:, 2]) tmp_xys = xfcapi.gvec_to_xy( - gVec_cs, rMat_d, rMat_ss, rMat_c, - tVec_d, tVec_s, tVec_c, - beam_vec=beamVec) + gVec_cs, + rMat_d, + rMat_ss, + rMat_c, + tVec_d, + tVec_s, + tVec_c, + beam_vec=beamVec, + ) valid_mask = ~(np.isnan(tmp_xys[:, 0]) | np.isnan(tmp_xys[:, 1])) @@ -712,43 +677,45 @@ def _project_on_detector_plane(allAngs, return det_xy, rMat_ss, valid_mask -def _project_on_detector_cylinder(allAngs, - chi, - tVec_d, - caxis, - paxis, - radius, - physical_size, - angle_extent, - distortion, - beamVec=constants.beam_vec, - etaVec=constants.eta_vec, - tVec_s=constants.zeros_3x1, - rmat_s=constants.identity_3x3, - tVec_c=constants.zeros_3x1): +def _project_on_detector_cylinder( + allAngs: np.ndarray, + chi: float, + tVec_d: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + physical_size: np.ndarray, + angle_extent: float, + distortion: DistortionABC = None, + beamVec: np.ndarray = constants.beam_vec, + etaVec: np.ndarray = constants.eta_vec, + tVec_s: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, + tVec_c: np.ndarray = constants.zeros_3x1, +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """ utility routine for projecting a list of (tth, eta, ome) onto the detector plane parameterized by the args. this function does the computation for a cylindrical detector """ - dVec_cs = xfcapi.anglesToDVec(allAngs, - chi=chi, - rMat_c=np.eye(3), - bHat_l=beamVec, - eHat_l=etaVec) + dVec_cs = xfcapi.anglesToDVec( + allAngs, chi=chi, rMat_c=np.eye(3), bHat_l=beamVec, eHat_l=etaVec + ) rMat_ss = np.tile(rmat_s, [allAngs.shape[0], 1, 1]) - tmp_xys, valid_mask = _dvecToDetectorXYcylinder(dVec_cs, - tVec_d, - caxis, - paxis, - radius, - physical_size, - angle_extent, - tVec_s=tVec_s, - rmat_s=rmat_s, - tVec_c=tVec_c) + tmp_xys, valid_mask = _dvecToDetectorXYcylinder( + dVec_cs, + tVec_d, + caxis, + paxis, + radius, + physical_size, + angle_extent, + tVec_s=tVec_s, + rmat_s=rmat_s, + tVec_c=tVec_c, + ) det_xy = np.atleast_2d(tmp_xys[valid_mask, :]) @@ -758,56 +725,68 @@ def _project_on_detector_cylinder(allAngs, return det_xy, rMat_ss, valid_mask -def _dvecToDetectorXYcylinder(dVec_cs, - tVec_d, - caxis, - paxis, - radius, - physical_size, - angle_extent, - tVec_s=constants.zeros_3x1, - tVec_c=constants.zeros_3x1, - rmat_s=constants.identity_3x3): - - cvec = _unitvec_to_cylinder(dVec_cs, - caxis, - paxis, - radius, - tVec_d, - tVec_s=tVec_s, - tVec_c=tVec_c, - rmat_s=rmat_s) - - cvec_det, valid_mask = _clip_to_cylindrical_detector(cvec, - tVec_d, - caxis, - paxis, - radius, - physical_size, - angle_extent, - tVec_s=tVec_s, - tVec_c=tVec_c, - rmat_s=rmat_s) - - xy_det = _dewarp_from_cylinder(cvec_det, - tVec_d, - caxis, - paxis, - radius, - tVec_s=tVec_s, - tVec_c=tVec_c, - rmat_s=rmat_s) + +def _dvecToDetectorXYcylinder( + dVec_cs: np.ndarray, + tVec_d: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + physical_size: np.ndarray, + angle_extent: float, + tVec_s: np.ndarray = constants.zeros_3x1, + tVec_c: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, +) -> tuple[np.ndarray, np.ndarray]: + + cvec = _unitvec_to_cylinder( + dVec_cs, + caxis, + paxis, + radius, + tVec_d, + tVec_s=tVec_s, + tVec_c=tVec_c, + rmat_s=rmat_s, + ) + + cvec_det, valid_mask = _clip_to_cylindrical_detector( + cvec, + tVec_d, + caxis, + paxis, + radius, + physical_size, + angle_extent, + tVec_s=tVec_s, + tVec_c=tVec_c, + rmat_s=rmat_s, + ) + + xy_det = _dewarp_from_cylinder( + cvec_det, + tVec_d, + caxis, + paxis, + radius, + tVec_s=tVec_s, + tVec_c=tVec_c, + rmat_s=rmat_s, + ) return xy_det, valid_mask -def _unitvec_to_cylinder(uvw, - caxis, - paxis, - radius, - tvec, - tVec_s=constants.zeros_3x1, - tVec_c=constants.zeros_3x1, - rmat_s=constants.identity_3x3): + +def _unitvec_to_cylinder( + uvw: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + tvec: np.ndarray, + tVec_s: np.ndarray = constants.zeros_3x1, + tVec_c: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, +) -> np.ndarray: """ get point where unitvector uvw intersect the cylindrical detector. @@ -827,17 +806,15 @@ def _unitvec_to_cylinder(uvw, the cylinder with (nx3) shape """ naxis = np.cross(caxis, paxis) - naxis = naxis/np.linalg.norm(naxis) + naxis = naxis / np.linalg.norm(naxis) tvec_c_l = np.dot(rmat_s, tVec_c) - delta = tvec - (radius*naxis + - np.squeeze(tVec_s) + - np.squeeze(tvec_c_l)) + delta = tvec - (radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l)) num = uvw.shape[0] cx = np.atleast_2d(caxis).T - delta_t = np.tile(delta,[num,1]) + delta_t = np.tile(delta, [num, 1]) t1 = np.dot(uvw, delta.T) t2 = np.squeeze(np.dot(uvw, cx)) @@ -845,29 +822,34 @@ def _unitvec_to_cylinder(uvw, t4 = np.dot(uvw, cx) A = np.squeeze(1 - t4**2) - B = t1 - t2*t3 - C = radius**2 - np.linalg.norm(delta)**2 + t3**2 - - mask = np.abs(A) < 1E-10 - beta = np.zeros([num, ]) + B = t1 - t2 * t3 + C = radius**2 - np.linalg.norm(delta) ** 2 + t3**2 + + mask = np.abs(A) < 1e-10 + beta = np.zeros( + [ + num, + ] + ) - beta[~mask] = (B[~mask] + - np.sqrt(B[~mask]**2 + - A[~mask]*C))/A[~mask] + beta[~mask] = (B[~mask] + np.sqrt(B[~mask] ** 2 + A[~mask] * C)) / A[~mask] beta[mask] = np.nan return np.tile(beta, [3, 1]).T * uvw -def _clip_to_cylindrical_detector(uvw, - tVec_d, - caxis, - paxis, - radius, - physical_size, - angle_extent, - tVec_s=constants.zeros_3x1, - tVec_c=constants.zeros_3x1, - rmat_s=constants.identity_3x3): + +def _clip_to_cylindrical_detector( + uvw: np.ndarray, + tVec_d: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + physical_size: np.ndarray, + angle_extent: float, + tVec_s: np.ndarray = constants.zeros_3x1, + tVec_c: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, +) -> tuple[np.ndarray, np.ndarray]: """ takes in the intersection points uvw with the cylindrical detector and @@ -895,52 +877,55 @@ def _clip_to_cylindrical_detector(uvw, tvec_c_l = np.dot(rmat_s, tVec_c) - delta = tVec_d - (radius*naxis + - np.squeeze(tVec_s) + - np.squeeze(tvec_c_l)) + delta = tVec_d - ( + radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l) + ) - delta_t = np.tile(delta,[num,1]) + delta_t = np.tile(delta, [num, 1]) uvwp = uvw - delta_t dp = np.dot(uvwp, cx) - uvwpxy = uvwp - np.tile(dp,[1,3])*np.tile(cx,[1,num]).T + uvwpxy = uvwp - np.tile(dp, [1, 3]) * np.tile(cx, [1, num]).T size = physical_size tvec = np.atleast_2d(tVec_d).T # ycomp = uvwp - np.tile(tVec_d,[num, 1]) - mask1 = np.squeeze(np.abs(dp) > size[0]*0.5) - uvwp[mask1,:] = np.nan + mask1 = np.squeeze(np.abs(dp) > size[0] * 0.5) + uvwp[mask1, :] = np.nan # next get rid of points that fall outside # the polar angle range - ang = np.dot(uvwpxy, nx)/radius - ang[np.abs(ang)>1.] = np.sign(ang[np.abs(ang)>1.]) + ang = np.dot(uvwpxy, nx) / radius + ang[np.abs(ang) > 1.0] = np.sign(ang[np.abs(ang) > 1.0]) ang = np.arccos(ang) mask2 = np.squeeze(ang >= angle_extent) mask = np.logical_or(mask1, mask2) res = uvw.copy() - res[mask,:] = np.nan + res[mask, :] = np.nan return res, ~mask -def _dewarp_from_cylinder(uvw, - tVec_d, - caxis, - paxis, - radius, - tVec_s=constants.zeros_3x1, - tVec_c=constants.zeros_3x1, - rmat_s=constants.identity_3x3): + +def _dewarp_from_cylinder( + uvw: np.ndarray, + tVec_d: np.ndarray, + caxis: np.ndarray, + paxis: np.ndarray, + radius: float, + tVec_s: np.ndarray = constants.zeros_3x1, + tVec_c: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, +): """ routine to convert cylindrical coordinates to cartesian coordinates in image frame """ naxis = np.cross(caxis, paxis) - naxis = naxis/np.linalg.norm(naxis) + naxis = naxis / np.linalg.norm(naxis) cx = np.atleast_2d(caxis).T px = np.atleast_2d(paxis).T @@ -949,34 +934,37 @@ def _dewarp_from_cylinder(uvw, tvec_c_l = np.dot(rmat_s, tVec_c) - delta = tVec_d - (radius*naxis + - np.squeeze(tVec_s) + - np.squeeze(tvec_c_l)) + delta = tVec_d - ( + radius * naxis + np.squeeze(tVec_s) + np.squeeze(tvec_c_l) + ) - delta_t = np.tile(delta,[num,1]) + delta_t = np.tile(delta, [num, 1]) uvwp = uvw - delta_t - uvwpxy = uvwp - np.tile(np.dot(uvwp, cx), [1, 3]) * \ - np.tile(cx, [1, num]).T + uvwpxy = uvwp - np.tile(np.dot(uvwp, cx), [1, 3]) * np.tile(cx, [1, num]).T - sgn = np.sign(np.dot(uvwpxy, px)); sgn[sgn==0.] = 1. - ang = np.dot(uvwpxy, nx)/radius - ang[np.abs(ang) > 1.] = np.sign(ang[np.abs(ang)>1.]) + sgn = np.sign(np.dot(uvwpxy, px)) + sgn[sgn == 0.0] = 1.0 + ang = np.dot(uvwpxy, nx) / radius + ang[np.abs(ang) > 1.0] = np.sign(ang[np.abs(ang) > 1.0]) ang = np.arccos(ang) - xcrd = np.squeeze(radius*ang*sgn) + xcrd = np.squeeze(radius * ang * sgn) ycrd = np.squeeze(np.dot(uvwp, cx)) return np.vstack((xcrd, ycrd)).T -def _warp_to_cylinder(cart, - tVec_d, - radius, - caxis, - paxis, - tVec_s=constants.zeros_3x1, - rmat_s=constants.identity_3x3, - tVec_c=constants.zeros_3x1, - normalize=True): + +def _warp_to_cylinder( + cart: np.ndarray, + tVec_d: np.ndarray, + radius: float, + caxis: np.ndarray, + paxis: np.ndarray, + tVec_s: np.ndarray = constants.zeros_3x1, + rmat_s: np.ndarray = constants.identity_3x3, + tVec_c: np.ndarray = constants.zeros_3x1, + normalize: bool = True, +) -> np.ndarray: """ routine to convert cartesian coordinates in image frame to cylindrical coordinates @@ -988,10 +976,11 @@ def _warp_to_cylinder(cart, tVec_c = np.atleast_2d(tVec_c).T num = cart.shape[0] naxis = np.cross(paxis, caxis) - x = cart[:,0]; y = cart[:,1] - th = x/radius - xp = radius*np.sin(th) - xn = radius*(1-np.cos(th)) + x = cart[:, 0] + y = cart[:, 1] + th = x / radius + xp = radius * np.sin(th) + xn = radius * (1 - np.cos(th)) ccomp = np.tile(y, [3, 1]).T * np.tile(caxis, [num, 1]) pcomp = np.tile(xp, [3, 1]).T * np.tile(paxis, [num, 1]) @@ -1000,14 +989,17 @@ def _warp_to_cylinder(cart, tVec_c_l = np.dot(rmat_s, tVec_c) - res = cart3d + np.tile(tvec-tVec_s-tVec_c_l, [1, num]).T + res = cart3d + np.tile(tvec - tVec_s - tVec_c_l, [1, num]).T if normalize: - return res/np.tile(np.linalg.norm(res, axis=1), [3, 1]).T + return res / np.tile(np.linalg.norm(res, axis=1), [3, 1]).T else: return res -def _dvec_to_angs(dvecs, bvec, evec): + +def _dvec_to_angs( + dvecs: np.ndarray, bvec: np.ndarray, evec: np.ndarray +) -> tuple[np.ndarray, np.ndarray]: """ convert diffraction vectors to (tth, eta) angles in the 'eta' frame @@ -1015,12 +1007,12 @@ def _dvec_to_angs(dvecs, bvec, evec): """ num = dvecs.shape[0] exb = np.cross(evec, bvec) - exb = exb/np.linalg.norm(exb) + exb = exb / np.linalg.norm(exb) bxexb = np.cross(bvec, exb) - bxexb = bxexb/np.linalg.norm(bxexb) + bxexb = bxexb / np.linalg.norm(bxexb) dp = np.dot(bvec, dvecs.T) - dp[np.abs(dp) > 1.] = np.sign(dp[np.abs(dp) > 1.]) + dp[np.abs(dp) > 1.0] = np.sign(dp[np.abs(dp) > 1.0]) tth = np.arccos(dp) dvecs_p = dvecs - np.tile(dp, [3, 1]).T * np.tile(bvec, [num, 1]) @@ -1029,15 +1021,24 @@ def _dvec_to_angs(dvecs, bvec, evec): dpy = np.dot(exb, dvecs_p.T) eta = np.arctan2(dpy, dpx) - return (tth, eta) - -def simulateGVecs(pd, detector_params, grain_params, - ome_range=[(-np.pi, np.pi), ], - ome_period=(-np.pi, np.pi), - eta_range=[(-np.pi, np.pi), ], - panel_dims=[(-204.8, -204.8), (204.8, 204.8)], - pixel_pitch=(0.2, 0.2), - distortion=None): + return tth, eta + + +def simulateGVecs( + pd: PlaneData, + detector_params: np.ndarray, + grain_params: np.ndarray, + ome_range: list[tuple[float]] = [ + (-np.pi, np.pi), + ], + ome_period: tuple[float] = (-np.pi, np.pi), + eta_range: list[tuple[float]] = [ + (-np.pi, np.pi), + ], + panel_dims: list[tuple[float]] = [(-204.8, -204.8), (204.8, 204.8)], + pixel_pitch: tuple[float] = (0.2, 0.2), + distortion: DistortionABC = None, +) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """ returns valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps @@ -1087,11 +1088,11 @@ def simulateGVecs(pd, detector_params, grain_params, angList = np.vstack( xfcapi.oscillAnglesOfHKLs( full_hkls[:, 1:], chi, rMat_c, bMat, wlen, vInv=vInv_s - ) ) + ) allAngs, allHKLs = _filter_hkls_eta_ome( full_hkls, angList, eta_range, ome_range - ) + ) if len(allAngs) == 0: valid_ids = [] @@ -1101,46 +1102,57 @@ def simulateGVecs(pd, detector_params, grain_params, ang_ps = [] else: # ??? preallocate for speed? - det_xy, rMat_s, on_plane = _project_on_detector_plane( - allAngs, - rMat_d, rMat_c, chi, - tVec_d, tVec_c, tVec_s, - distortion - ) - # - on_panel_x = np.logical_and( - det_xy[:, 0] >= panel_dims[0][0], - det_xy[:, 0] <= panel_dims[1][0] - ) - on_panel_y = np.logical_and( - det_xy[:, 1] >= panel_dims[0][1], - det_xy[:, 1] <= panel_dims[1][1] - ) - on_panel = np.logical_and(on_panel_x, on_panel_y) - # + det_xy, rMat_s, _ = _project_on_detector_plane( + allAngs, rMat_d, rMat_c, chi, tVec_d, tVec_c, tVec_s, distortion + ) + + on_panel = np.logical_and( + np.logical_and( + det_xy[:, 0] >= panel_dims[0][0], + det_xy[:, 0] <= panel_dims[1][0], + ), + np.logical_and( + det_xy[:, 1] >= panel_dims[0][1], + det_xy[:, 1] <= panel_dims[1][1], + ), + ) + op_idx = np.where(on_panel)[0] - # + valid_ang = allAngs[op_idx, :] valid_ang[:, 2] = xfcapi.mapAngle(valid_ang[:, 2], ome_period) valid_ids = allHKLs[op_idx, 0] valid_hkl = allHKLs[op_idx, 1:] valid_xy = det_xy[op_idx, :] - ang_ps = angularPixelSize(valid_xy, pixel_pitch, - rMat_d, rMat_s, - tVec_d, tVec_s, tVec_c, - distortion=distortion) + ang_ps = angularPixelSize( + valid_xy, + pixel_pitch, + rMat_d, + rMat_s, + tVec_d, + tVec_s, + tVec_c, + distortion=distortion, + ) return valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps -def simulateLauePattern(hkls, bMat, - rmat_d, tvec_d, - panel_dims, panel_buffer=5, - minEnergy=8, maxEnergy=24, - rmat_s=np.eye(3), - grain_params=None, - distortion=None, - beamVec=None): +@deprecated(new_func=simlp, removal_date='2025-01-01') +def simulateLauePattern( + hkls, + bMat, + rmat_d, + tvec_d, + panel_dims, + panel_buffer=5, + minEnergy=8, + maxEnergy=24, + rmat_s=np.eye(3), + grain_params=None, + distortion=None, + beamVec=None, +): if beamVec is None: beamVec = xfcapi.bVec_ref @@ -1148,14 +1160,12 @@ def simulateLauePattern(hkls, bMat, # parse energy ranges multipleEnergyRanges = False if hasattr(maxEnergy, '__len__'): - assert len(maxEnergy) == len(minEnergy), \ - 'energy cutoff ranges must have the same length' + assert len(maxEnergy) == len( + minEnergy + ), 'energy cutoff ranges must have the same length' multipleEnergyRanges = True - lmin = [] - lmax = [] - for i in range(len(maxEnergy)): - lmin.append(processWavelength(maxEnergy[i])) - lmax.append(processWavelength(minEnergy[i])) + lmin = [processWavelength(e) for e in maxEnergy] + lmax = [processWavelength(e) for e in minEnergy] else: lmin = processWavelength(maxEnergy) lmax = processWavelength(minEnergy) @@ -1163,10 +1173,7 @@ def simulateLauePattern(hkls, bMat, # process crystal rmats and inverse stretches if grain_params is None: grain_params = np.atleast_2d( - [0., 0., 0., - 0., 0., 0., - 1., 1., 1., 0., 0., 0. - ] + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0] ) n_grains = len(grain_params) @@ -1181,11 +1188,11 @@ def simulateLauePattern(hkls, bMat, ghat_c = mutil.unitVector(np.dot(bMat, hkls)) # pre-allocate output arrays - xy_det = np.nan*np.ones((n_grains, nhkls_tot, 2)) - hkls_in = np.nan*np.ones((n_grains, 3, nhkls_tot)) - angles = np.nan*np.ones((n_grains, nhkls_tot, 2)) - dspacing = np.nan*np.ones((n_grains, nhkls_tot)) - energy = np.nan*np.ones((n_grains, nhkls_tot)) + xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2)) + hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot)) + angles = np.nan * np.ones((n_grains, nhkls_tot, 2)) + dspacing = np.nan * np.ones((n_grains, nhkls_tot)) + energy = np.nan * np.ones((n_grains, nhkls_tot)) """ LOOP OVER GRAINS @@ -1197,16 +1204,20 @@ def simulateLauePattern(hkls, bMat, vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1)) # stretch them: V^(-1) * R * Gc - ghat_s_str = mutil.unitVector( - np.dot(vInv_s, np.dot(rmat_c, ghat_c)) - ) + ghat_s_str = mutil.unitVector(np.dot(vInv_s, np.dot(rmat_c, ghat_c))) ghat_c_str = np.dot(rmat_c.T, ghat_s_str) # project - dpts = xfcapi.gvec_to_xy(ghat_c_str.T, - rmat_d, rmat_s, rmat_c, - tvec_d, tvec_s, tvec_c, - beam_vec=beamVec).T + dpts = xfcapi.gvec_to_xy( + ghat_c_str.T, + rmat_d, + rmat_s, + rmat_c, + tvec_d, + tvec_s, + tvec_c, + beam_vec=beamVec, + ).T # check intersections with detector plane canIntersect = ~np.isnan(dpts[0, :]) @@ -1218,10 +1229,8 @@ def simulateLauePattern(hkls, bMat, # back to angles tth_eta, gvec_l = xfcapi.detectorXYToGvec( - dpts.T, - rmat_d, rmat_s, - tvec_d, tvec_s, tvec_c, - beamVec=beamVec) + dpts.T, rmat_d, rmat_s, tvec_d, tvec_s, tvec_c, beamVec=beamVec + ) tth_eta = np.vstack(tth_eta).T # warp measured points @@ -1229,27 +1238,28 @@ def simulateLauePattern(hkls, bMat, dpts = distortion.apply_inverse(dpts) # plane spacings and energies - dsp = 1. / mutil.columnNorm(np.dot(bMat, dhkl)) - wlen = 2*dsp*np.sin(0.5*tth_eta[:, 0]) + dsp = 1.0 / mutil.columnNorm(np.dot(bMat, dhkl)) + wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0]) # find on spatial extent of detector xTest = np.logical_and( - dpts[0, :] >= -0.5*panel_dims[1] + panel_buffer, - dpts[0, :] <= 0.5*panel_dims[1] - panel_buffer) + dpts[0, :] >= -0.5 * panel_dims[1] + panel_buffer, + dpts[0, :] <= 0.5 * panel_dims[1] - panel_buffer, + ) yTest = np.logical_and( - dpts[1, :] >= -0.5*panel_dims[0] + panel_buffer, - dpts[1, :] <= 0.5*panel_dims[0] - panel_buffer) + dpts[1, :] >= -0.5 * panel_dims[0] + panel_buffer, + dpts[1, :] <= 0.5 * panel_dims[0] - panel_buffer, + ) onDetector = np.logical_and(xTest, yTest) if multipleEnergyRanges: validEnergy = np.zeros(len(wlen), dtype=bool) for i in range(len(lmin)): - validEnergy = validEnergy | \ - np.logical_and(wlen >= lmin[i], wlen <= lmax[i]) - pass + validEnergy = validEnergy | np.logical_and( + wlen >= lmin[i], wlen <= lmax[i] + ) else: validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) - pass # index for valid reflections keepers = np.where(np.logical_and(onDetector, validEnergy))[0] @@ -1260,180 +1270,116 @@ def simulateLauePattern(hkls, bMat, angles[iG][keepers, :] = tth_eta[keepers, :] dspacing[iG, keepers] = dsp[keepers] energy[iG, keepers] = processWavelength(wlen[keepers]) - pass - pass return xy_det, hkls_in, angles, dspacing, energy -if USE_NUMBA: - @numba.njit(nogil=True, cache=True) - def _expand_pixels(original, w, h, result): - hw = 0.5 * w - hh = 0.5 * h - for el in range(len(original)): - x, y = original[el, 0], original[el, 1] - result[el*4 + 0, 0] = x - hw - result[el*4 + 0, 1] = y - hh - result[el*4 + 1, 0] = x + hw - result[el*4 + 1, 1] = y - hh - result[el*4 + 2, 0] = x + hw - result[el*4 + 2, 1] = y + hh - result[el*4 + 3, 0] = x - hw - result[el*4 + 3, 1] = y + hh - - return result - - @numba.njit(nogil=True, cache=True) - def _compute_max(tth, eta, result): - period = 2.0 * np.pi - hperiod = np.pi - for el in range(0, len(tth), 4): - max_tth = np.abs(tth[el + 0] - tth[el + 3]) - eta_diff = eta[el + 0] - eta[el + 3] - max_eta = np.abs( +@numba.njit(nogil=True, cache=True) +def _expand_pixels( + original: np.ndarray, w: float, h: float, result: np.ndarray +) -> np.ndarray: + hw = 0.5 * w + hh = 0.5 * h + for el in range(len(original)): + x, y = original[el, 0], original[el, 1] + result[el * 4 + 0, 0] = x - hw + result[el * 4 + 0, 1] = y - hh + result[el * 4 + 1, 0] = x + hw + result[el * 4 + 1, 1] = y - hh + result[el * 4 + 2, 0] = x + hw + result[el * 4 + 2, 1] = y + hh + result[el * 4 + 3, 0] = x - hw + result[el * 4 + 3, 1] = y + hh + + return result + + +@numba.njit(nogil=True, cache=True) +def _compute_max( + tth: np.ndarray, eta: np.ndarray, result: np.ndarray +) -> np.ndarray: + period = 2.0 * np.pi + hperiod = np.pi + for el in range(0, len(tth), 4): + max_tth = np.abs(tth[el + 0] - tth[el + 3]) + eta_diff = eta[el + 0] - eta[el + 3] + max_eta = np.abs(np.remainder(eta_diff + hperiod, period) - hperiod) + for i in range(3): + curr_tth = np.abs(tth[el + i] - tth[el + i + 1]) + eta_diff = eta[el + i] - eta[el + i + 1] + curr_eta = np.abs( np.remainder(eta_diff + hperiod, period) - hperiod ) - for i in range(3): - curr_tth = np.abs(tth[el + i] - tth[el + i + 1]) - eta_diff = eta[el + i] - eta[el + i + 1] - curr_eta = np.abs( - np.remainder(eta_diff + hperiod, period) - hperiod - ) - max_tth = np.maximum(curr_tth, max_tth) - max_eta = np.maximum(curr_eta, max_eta) - result[el//4, 0] = max_tth - result[el//4, 1] = max_eta - - return result - - def angularPixelSize( - xy_det, xy_pixelPitch, - rMat_d, rMat_s, - tVec_d, tVec_s, tVec_c, - distortion=None, beamVec=None, etaVec=None): - """ - Calculate angular pixel sizes on a detector. - - * choices to beam vector and eta vector specs have been supressed - * assumes xy_det in UNWARPED configuration - """ - xy_det = np.atleast_2d(xy_det) - if distortion is not None: # !!! check this logic - xy_det = distortion.apply(xy_det) - if beamVec is None: - beamVec = xfcapi.bVec_ref - if etaVec is None: - etaVec = xfcapi.eta_ref - - xy_expanded = np.empty((len(xy_det) * 4, 2), dtype=xy_det.dtype) - xy_expanded = _expand_pixels( - xy_det, - xy_pixelPitch[0], xy_pixelPitch[1], - xy_expanded) - gvec_space, _ = xfcapi.detectorXYToGvec( - xy_expanded, - rMat_d, rMat_s, - tVec_d, tVec_s, tVec_c, - beamVec=beamVec, etaVec=etaVec) - result = np.empty_like(xy_det) - return _compute_max(gvec_space[0], gvec_space[1], result) -else: - def angularPixelSize(xy_det, xy_pixelPitch, - rMat_d, rMat_s, - tVec_d, tVec_s, tVec_c, - distortion=None, beamVec=None, etaVec=None): - """ - Calculate angular pixel sizes on a detector. - - * choices to beam vector and eta vector specs have been supressed - * assumes xy_det in UNWARPED configuration - """ - xy_det = np.atleast_2d(xy_det) - if distortion is not None: # !!! check this logic - xy_det = distortion.apply(xy_det) - if beamVec is None: - beamVec = xfcapi.bVec_ref - if etaVec is None: - etaVec = xfcapi.eta_ref - - xp = np.r_[-0.5, 0.5, 0.5, -0.5] * xy_pixelPitch[0] - yp = np.r_[-0.5, -0.5, 0.5, 0.5] * xy_pixelPitch[1] - - diffs = np.array([[3, 3, 2, 1], - [2, 0, 1, 0]]) - - ang_pix = np.zeros((len(xy_det), 2)) - - for ipt, xy in enumerate(xy_det): - xc = xp + xy[0] - yc = yp + xy[1] - - tth_eta, gHat_l = xfcapi.detectorXYToGvec( - np.vstack([xc, yc]).T, - rMat_d, rMat_s, - tVec_d, tVec_s, tVec_c, - beamVec=beamVec, etaVec=etaVec) - delta_tth = np.zeros(4) - delta_eta = np.zeros(4) - for j in range(4): - delta_tth[j] = abs( - tth_eta[0][diffs[0, j]] - tth_eta[0][diffs[1, j]] - ) - delta_eta[j] = xfcapi.angularDifference( - tth_eta[1][diffs[0, j]], tth_eta[1][diffs[1, j]] - ) - - ang_pix[ipt, 0] = np.amax(delta_tth) - ang_pix[ipt, 1] = np.amax(delta_eta) - return ang_pix - - -if USE_NUMBA: - @numba.njit(nogil=True, cache=True) - def _coo_build_window_jit(frame_row, frame_col, frame_data, - min_row, max_row, min_col, max_col, - result): - n = len(frame_row) - for i in range(n): - if ((min_row <= frame_row[i] <= max_row) and - (min_col <= frame_col[i] <= max_col)): - new_row = frame_row[i] - min_row - new_col = frame_col[i] - min_col - result[new_row, new_col] = frame_data[i] - - return result - - def _coo_build_window(frame_i, min_row, max_row, min_col, max_col): - window = np.zeros( - ((max_row - min_row + 1), (max_col - min_col + 1)), - dtype=np.int16 - ) - - return _coo_build_window_jit(frame_i.row, frame_i.col, frame_i.data, - min_row, max_row, min_col, max_col, - window) -else: # not USE_NUMBA - def _coo_build_window(frame_i, min_row, max_row, min_col, max_col): - mask = ((min_row <= frame_i.row) & (frame_i.row <= max_row) & - (min_col <= frame_i.col) & (frame_i.col <= max_col)) - new_row = frame_i.row[mask] - min_row - new_col = frame_i.col[mask] - min_col - new_data = frame_i.data[mask] - window = np.zeros( - ((max_row - min_row + 1), (max_col - min_col + 1)), - dtype=np.int16 - ) - window[new_row, new_col] = new_data - - return window + max_tth = np.maximum(curr_tth, max_tth) + max_eta = np.maximum(curr_eta, max_eta) + result[el // 4, 0] = max_tth + result[el // 4, 1] = max_eta + + return result + + +def angularPixelSize( + xy_det: np.ndarray, + xy_pixelPitch: tuple[float], + rMat_d: np.ndarray, + rMat_s: np.ndarray, + tVec_d: np.ndarray, + tVec_s: np.ndarray, + tVec_c: np.ndarray, + distortion: DistortionABC = None, + beamVec: np.ndarray = None, + etaVec: np.ndarray = None, +) -> np.ndarray: + """ + Calculate angular pixel sizes on a detector. + * choices to beam vector and eta vector specs have been supressed + * assumes xy_det in UNWARPED configuration + """ + xy_det = np.atleast_2d(xy_det) + if distortion is not None: # !!! check this logic + xy_det = distortion.apply(xy_det) + if beamVec is None: + beamVec = xfcapi.bVec_ref + if etaVec is None: + etaVec = xfcapi.eta_ref -def make_reflection_patches(instr_cfg, - tth_eta, ang_pixel_size, omega=None, - tth_tol=0.2, eta_tol=1.0, - rmat_c=np.eye(3), tvec_c=np.zeros((3, 1)), - npdiv=1, quiet=False, - compute_areas_func=gutil.compute_areas): + xy_expanded = np.empty((len(xy_det) * 4, 2), dtype=xy_det.dtype) + xy_expanded = _expand_pixels( + xy_det, xy_pixelPitch[0], xy_pixelPitch[1], xy_expanded + ) + gvec_space, _ = xfcapi.detectorXYToGvec( + xy_expanded, + rMat_d, + rMat_s, + tVec_d, + tVec_s, + tVec_c, + beamVec=beamVec, + etaVec=etaVec, + ) + result = np.empty_like(xy_det) + return _compute_max(gvec_space[0], gvec_space[1], result) + + +def make_reflection_patches( + instr_cfg: dict[str, Any], + tth_eta: np.ndarray, + ang_pixel_size: np.ndarray, + omega: Optional[np.ndarray] = None, + tth_tol: float = 0.2, + eta_tol: float = 1.0, + rmat_c: np.ndarray = np.eye(3), + tvec_c: np.ndarray = np.zeros((3, 1)), + npdiv: int = 1, + quiet: bool = False, # TODO: Remove this parameter - it isn't used + compute_areas_func: np.ndarray = gutil.compute_areas, +) -> Generator[ + tuple[ + np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray + ], + None, + None, +]: """Make angular patches on a detector. panel_dims are [(xmin, ymin), (xmax, ymax)] in mm @@ -1463,12 +1409,11 @@ def make_reflection_patches(instr_cfg, (x_center, y_center), (i_row, j_col) """ - npts = len(tth_eta) # detector quantities rmat_d = xfcapi.makeRotMatOfExpMap( np.r_[instr_cfg['detector']['transform']['tilt']] - ) + ) tvec_d = np.r_[instr_cfg['detector']['transform']['translation']] pixel_size = instr_cfg['detector']['pixels']['size'] @@ -1476,13 +1421,13 @@ def make_reflection_patches(instr_cfg, frame_ncols = instr_cfg['detector']['pixels']['columns'] panel_dims = ( - -0.5*np.r_[frame_ncols*pixel_size[1], frame_nrows*pixel_size[0]], - 0.5*np.r_[frame_ncols*pixel_size[1], frame_nrows*pixel_size[0]] - ) - row_edges = np.arange(frame_nrows + 1)[::-1]*pixel_size[1] \ - + panel_dims[0][1] - col_edges = np.arange(frame_ncols + 1)*pixel_size[0] \ - + panel_dims[0][0] + -0.5 * np.r_[frame_ncols * pixel_size[1], frame_nrows * pixel_size[0]], + 0.5 * np.r_[frame_ncols * pixel_size[1], frame_nrows * pixel_size[0]], + ) + row_edges = ( + np.arange(frame_nrows + 1)[::-1] * pixel_size[1] + panel_dims[0][1] + ) + col_edges = np.arange(frame_ncols + 1) * pixel_size[0] + panel_dims[0][0] # handle distortion distortion = None @@ -1492,51 +1437,40 @@ def make_reflection_patches(instr_cfg, try: func_name = distortion_cfg['function_name'] dparams = distortion_cfg['parameters'] - distortion = distortion_pkg.get_mapping( - func_name, dparams - ) - except(KeyError): - raise RuntimeError( - "problem with distortion specification" - ) + distortion = distortion_pkg.get_mapping(func_name, dparams) + except KeyError: + raise RuntimeError("problem with distortion specification") # sample frame chi = instr_cfg['oscillation_stage']['chi'] tvec_s = np.r_[instr_cfg['oscillation_stage']['translation']] - - # beam vector bvec = np.r_[instr_cfg['beam']['vector']] # data to loop # ??? WOULD IT BE CHEAPER TO CARRY ZEROS OR USE CONDITIONAL? if omega is None: - full_angs = np.hstack([tth_eta, np.zeros((npts, 1))]) + full_angs = np.hstack([tth_eta, np.zeros((len(tth_eta), 1))]) else: - full_angs = np.hstack([tth_eta, omega.reshape(npts, 1)]) + full_angs = np.hstack([tth_eta, omega.reshape(len(tth_eta), 1)]) - patches = [] for angs, pix in zip(full_angs, ang_pixel_size): # calculate bin edges for patch based on local angular pixel size # tth ntths, tth_edges = gutil.make_tolerance_grid( bin_width=np.degrees(pix[0]), window_width=tth_tol, - num_subdivisions=npdiv + num_subdivisions=npdiv, ) # eta netas, eta_edges = gutil.make_tolerance_grid( bin_width=np.degrees(pix[1]), window_width=eta_tol, - num_subdivisions=npdiv + num_subdivisions=npdiv, ) # FOR ANGULAR MESH - conn = gutil.cellConnectivity( - netas, - ntths, - origin='ll' - ) + conn = gutil.cellConnectivity(netas, ntths, origin='ll') # meshgrid args are (cols, rows), a.k.a (fast, slow) m_tth, m_eta = np.meshgrid(tth_edges, eta_edges) @@ -1545,59 +1479,78 @@ def make_reflection_patches(instr_cfg, # calculate the patch XY coords from the (tth, eta) angles # !!! will CHEAT and ignore the small perturbation the different # omega angle values causes and simply use the central value - gVec_angs_vtx = np.tile(angs, (npts_patch, 1)) \ - + np.radians(np.vstack([m_tth.flatten(), - m_eta.flatten(), - np.zeros(npts_patch)]).T) - - xy_eval_vtx, rmats_s, on_plane = _project_on_detector_plane( - gVec_angs_vtx, - rmat_d, rmat_c, - chi, - tvec_d, tvec_c, tvec_s, - distortion, - beamVec=bvec) + gVec_angs_vtx = np.tile(angs, (npts_patch, 1)) + np.radians( + np.vstack( + [m_tth.flatten(), m_eta.flatten(), np.zeros(npts_patch)] + ).T + ) + + xy_eval_vtx, _, _ = _project_on_detector_plane( + gVec_angs_vtx, + rmat_d, + rmat_c, + chi, + tvec_d, + tvec_c, + tvec_s, + distortion, + beamVec=bvec, + ) areas = compute_areas_func(xy_eval_vtx, conn) # EVALUATION POINTS # !!! for lack of a better option will use centroids tth_eta_cen = gutil.cellCentroids( - np.atleast_2d(gVec_angs_vtx[:, :2]), - conn + np.atleast_2d(gVec_angs_vtx[:, :2]), conn ) gVec_angs = np.hstack( - [tth_eta_cen, - np.tile(angs[2], (len(tth_eta_cen), 1))] + [tth_eta_cen, np.tile(angs[2], (len(tth_eta_cen), 1))] ) - xy_eval, rmats_s, on_plane = _project_on_detector_plane( - gVec_angs, - rmat_d, rmat_c, - chi, - tvec_d, tvec_c, tvec_s, - distortion, - beamVec=bvec) + xy_eval, _, _ = _project_on_detector_plane( + gVec_angs, + rmat_d, + rmat_c, + chi, + tvec_d, + tvec_c, + tvec_s, + distortion, + beamVec=bvec, + ) row_indices = gutil.cellIndices(row_edges, xy_eval[:, 1]) col_indices = gutil.cellIndices(col_edges, xy_eval[:, 0]) - yield( - ((gVec_angs_vtx[:, 0].reshape(m_tth.shape), - gVec_angs_vtx[:, 1].reshape(m_tth.shape)), - (xy_eval_vtx[:, 0].reshape(m_tth.shape), - xy_eval_vtx[:, 1].reshape(m_tth.shape)), - conn, - areas.reshape(netas, ntths), - (xy_eval[:, 0].reshape(netas, ntths), - xy_eval[:, 1].reshape(netas, ntths)), - (row_indices.reshape(netas, ntths), - col_indices.reshape(netas, ntths))) + yield ( + ( + ( + gVec_angs_vtx[:, 0].reshape(m_tth.shape), + gVec_angs_vtx[:, 1].reshape(m_tth.shape), + ), + ( + xy_eval_vtx[:, 0].reshape(m_tth.shape), + xy_eval_vtx[:, 1].reshape(m_tth.shape), + ), + conn, + areas.reshape(netas, ntths), + ( + xy_eval[:, 0].reshape(netas, ntths), + xy_eval[:, 1].reshape(netas, ntths), + ), + ( + row_indices.reshape(netas, ntths), + col_indices.reshape(netas, ntths), + ), + ) ) -def extract_detector_transformation(detector_params): +def extract_detector_transformation( + detector_params: Union[dict[str, Any], np.ndarray] +) -> tuple[np.ndarray, np.ndarray, float, np.ndarray]: """ Construct arrays from detector parameters. @@ -1625,13 +1578,14 @@ def extract_detector_transformation(detector_params): if isinstance(detector_params, dict): rMat_d = xfcapi.makeRotMatOfExpMap( np.array(detector_params['detector']['transform']['tilt']) - ) + ) tVec_d = np.r_[detector_params['detector']['transform']['translation']] chi = detector_params['oscillation_stage']['chi'] tVec_s = np.r_[detector_params['oscillation_stage']['translation']] else: - assert len(detector_params >= 10), \ - "list of detector parameters must have length >= 10" + assert len( + detector_params >= 10 + ), "list of detector parameters must have length >= 10" rMat_d = xfcapi.makeRotMatOfExpMap(detector_params[:3]) tVec_d = np.ascontiguousarray(detector_params[3:6]) chi = detector_params[6] diff --git a/scripts/calibrate_from_powder.py b/scripts/calibrate_from_powder.py index fccf64140..6a94a47e0 100644 --- a/scripts/calibrate_from_powder.py +++ b/scripts/calibrate_from_powder.py @@ -225,9 +225,7 @@ def make_matl(mat_name, sgnum, lparms, hkl_ssq_max=50): tmp.append( np.hstack([xy_meas.squeeze(), tth_meas, tth0[i_ring], eta_ref]) ) - pass rhs[det_key].append(np.vstack(tmp)) - pass rhs[det_key] = np.array(rhs[det_key]) # %% plot fit poistions @@ -533,7 +531,6 @@ def multipanel_powder_objfunc(param_list, data_dict, instr): else: good_patch = [px, py] ax2.plot(px, py, 'm.', markersize=0.1) - pass ax2.plot(good_patch[0], good_patch[1], 'c.', markersize=0.1) aext = np.degrees( [np.min(rp[0][0]), diff --git a/scripts/calibrate_from_powder.py.bak b/scripts/calibrate_from_powder.py.bak index 6dfdc2cf2..1bc9cc24b 100644 --- a/scripts/calibrate_from_powder.py.bak +++ b/scripts/calibrate_from_powder.py.bak @@ -234,9 +234,7 @@ for det_key in det_keys: tmp.append( np.hstack([xy_meas.squeeze(), tth_meas, tth0[i_ring], eta_ref]) ) - pass rhs[det_key].append(np.vstack(tmp)) - pass rhs[det_key] = np.array(rhs[det_key]) # %% plot fit poistions @@ -528,7 +526,6 @@ while delta_r > 0.1: tmp.append( np.hstack([xy_meas.squeeze(), tth_meas, tth0[i_ring], eta_ref]) ) - pass rhs[det_key].append(np.vstack(tmp)) initial_guess = np.array(x0) @@ -633,7 +630,7 @@ for rp in rpatches: else: good_patch = [px, py] ax2.plot(px, py, 'm.', markersize=0.1) - pass + ax2.plot(good_patch[0], good_patch[1], 'c.', markersize=0.1) aext = np.degrees( [np.min(rp[0][0]), diff --git a/scripts/calibrate_from_rotation_series.py b/scripts/calibrate_from_rotation_series.py index db1dd0996..174233d9a 100644 --- a/scripts/calibrate_from_rotation_series.py +++ b/scripts/calibrate_from_rotation_series.py @@ -221,10 +221,7 @@ def calibrate_instrument_from_sx( panel.tilt = fit_params[ii:ii + 3] panel.tvec = fit_params[ii + 3:ii + 6] ii += 6 - # !!! use jj to do distortion... - if panel.distortion is not None: - pass - pass + # !!! use jj to do distortion? return fit_params, resd, sim_final @@ -274,7 +271,6 @@ def sxcal_obj_func(plist_fit, plist_full, param_flags, dparams = dparams_all[jj:jj + len_these_dps] jj += len_these_dps xy_unwarped[det_key] = dfunc(xy_unwarped[det_key], dparams) - pass meas_omes[det_key] = xyo_det[det_key][:, 2] # get these panel params for convenience @@ -316,7 +312,6 @@ def sxcal_obj_func(plist_fit, plist_full, param_flags, calc_xy[det_key] = calc_xy_tmp ii += 6 - pass # return values if sim_only: @@ -336,7 +331,6 @@ def sxcal_obj_func(plist_fit, plist_full, param_flags, calc_xy_all.append(calc_xy[det_key]) meas_omes_all.append(meas_omes[det_key]) calc_omes_all.append(calc_omes[det_key]) - pass meas_xy_all = np.vstack(meas_xy_all) calc_xy_all = np.vstack(calc_xy_all) meas_omes_all = np.hstack(meas_omes_all) @@ -404,7 +398,6 @@ def parse_reflection_tables(cfg, instr, grain_id, refit_idx=None): hkls[det_key] = gtable[idx, 2:5] meas_omes = gtable[idx, 12].reshape(sum(idx), 1) xyo_det[det_key] = np.hstack([gtable[idx, -2:], meas_omes]) - pass return hkls, xyo_det, idx_0 @@ -435,7 +428,6 @@ def parse_reflection_tables(cfg, instr, grain_id, refit_idx=None): max_pix_size = max(max_pix_size, max(panel.pixel_size_col, panel.pixel_size_col) ) - pass # grab omega period # !!! data should be consistent @@ -546,7 +538,6 @@ def parse_reflection_tables(cfg, instr, grain_id, refit_idx=None): # for det_key, panel in instr.detectors.iteritems(): # hkls_refit = hkls[det_key][idx_new[det_key], :] # xyo_det_refit = xyo_det[det_key][idx_0[det_key], :] -# pass # update calibration crystal params grain_parameters[:3] = params[5:8] diff --git a/scripts/calibration_mockup.py b/scripts/calibration_mockup.py index 38c4a2c81..a5065c3ed 100644 --- a/scripts/calibration_mockup.py +++ b/scripts/calibration_mockup.py @@ -280,11 +280,8 @@ def _extract_powder_lines(self): eta_ref] ) ) - pass rhs[det_key].append(np.vstack(tmp)) - pass rhs[det_key] = np.vstack(rhs[det_key]) - pass return rhs def residual(self, reduced_params, data_dict): @@ -428,7 +425,6 @@ def residual(self, reduced_params, data_dict): cf[ii + 2] = False cf[ii + 6:ii + 9] = True ii += 12 - pass instr.calibration_flags = cf # powder calibrator diff --git a/scripts/powder_calibration.py b/scripts/powder_calibration.py index d0ece0e34..5fac2db3f 100644 --- a/scripts/powder_calibration.py +++ b/scripts/powder_calibration.py @@ -219,11 +219,8 @@ def _extract_powder_lines(self): eta_ref] ) ) - pass rhs[det_key].append(np.vstack(tmp)) - pass rhs[det_key] = np.vstack(rhs[det_key]) - pass return rhs def residual(self, reduced_params, data_dict): diff --git a/scripts/tiffs_from_h5.py b/scripts/tiffs_from_h5.py index 95a47d457..b1c326851 100644 --- a/scripts/tiffs_from_h5.py +++ b/scripts/tiffs_from_h5.py @@ -62,4 +62,3 @@ os.path.join(image_dir, tiff_fname), ims[i] ) - pass diff --git a/tests/requirements-dev.txt b/tests/requirements-dev.txt index be86990b3..e9ca70e3c 100644 --- a/tests/requirements-dev.txt +++ b/tests/requirements-dev.txt @@ -1,2 +1,3 @@ pytest coloredlogs +pytest-codecov diff --git a/tests/test_matrix_utils.py b/tests/test_matrix_utils.py new file mode 100644 index 000000000..510eb0f25 --- /dev/null +++ b/tests/test_matrix_utils.py @@ -0,0 +1,53 @@ +import numpy as np + +from hexrd import matrixutil as mutil + + +def test_vec_mv_cob_matrix(): + np.random.seed(0) + # Generate some random matrices + R = np.random.rand(20, 3, 3) * 2 - 1 + + T = np.zeros((len(R), 6, 6), dtype='float64') + sqr2 = np.sqrt(2) + # Hardcoded implementation + T[:, 0, 0] = R[:, 0, 0]**2 + T[:, 0, 1] = R[:, 0, 1]**2 + T[:, 0, 2] = R[:, 0, 2]**2 + T[:, 0, 3] = sqr2 * R[:, 0, 1] * R[:, 0, 2] + T[:, 0, 4] = sqr2 * R[:, 0, 0] * R[:, 0, 2] + T[:, 0, 5] = sqr2 * R[:, 0, 0] * R[:, 0, 1] + T[:, 1, 0] = R[:, 1, 0]**2 + T[:, 1, 1] = R[:, 1, 1]**2 + T[:, 1, 2] = R[:, 1, 2]**2 + T[:, 1, 3] = sqr2 * R[:, 1, 1] * R[:, 1, 2] + T[:, 1, 4] = sqr2 * R[:, 1, 0] * R[:, 1, 2] + T[:, 1, 5] = sqr2 * R[:, 1, 0] * R[:, 1, 1] + T[:, 2, 0] = R[:, 2, 0]**2 + T[:, 2, 1] = R[:, 2, 1]**2 + T[:, 2, 2] = R[:, 2, 2]**2 + T[:, 2, 3] = sqr2 * R[:, 2, 1] * R[:, 2, 2] + T[:, 2, 4] = sqr2 * R[:, 2, 0] * R[:, 2, 2] + T[:, 2, 5] = sqr2 * R[:, 2, 0] * R[:, 2, 1] + T[:, 3, 0] = sqr2 * R[:, 1, 0] * R[:, 2, 0] + T[:, 3, 1] = sqr2 * R[:, 1, 1] * R[:, 2, 1] + T[:, 3, 2] = sqr2 * R[:, 1, 2] * R[:, 2, 2] + T[:, 3, 3] = R[:, 1, 2] * R[:, 2, 1] + R[:, 1, 1] * R[:, 2, 2] + T[:, 3, 4] = R[:, 1, 2] * R[:, 2, 0] + R[:, 1, 0] * R[:, 2, 2] + T[:, 3, 5] = R[:, 1, 1] * R[:, 2, 0] + R[:, 1, 0] * R[:, 2, 1] + T[:, 4, 0] = sqr2 * R[:, 0, 0] * R[:, 2, 0] + T[:, 4, 1] = sqr2 * R[:, 0, 1] * R[:, 2, 1] + T[:, 4, 2] = sqr2 * R[:, 0, 2] * R[:, 2, 2] + T[:, 4, 3] = R[:, 0, 2] * R[:, 2, 1] + R[:, 0, 1] * R[:, 2, 2] + T[:, 4, 4] = R[:, 0, 2] * R[:, 2, 0] + R[:, 0, 0] * R[:, 2, 2] + T[:, 4, 5] = R[:, 0, 1] * R[:, 2, 0] + R[:, 0, 0] * R[:, 2, 1] + T[:, 5, 0] = sqr2 * R[:, 0, 0] * R[:, 1, 0] + T[:, 5, 1] = sqr2 * R[:, 0, 1] * R[:, 1, 1] + T[:, 5, 2] = sqr2 * R[:, 0, 2] * R[:, 1, 2] + T[:, 5, 3] = R[:, 0, 2] * R[:, 1, 1] + R[:, 0, 1] * R[:, 1, 2] + T[:, 5, 4] = R[:, 0, 0] * R[:, 1, 2] + R[:, 0, 2] * R[:, 1, 0] + T[:, 5, 5] = R[:, 0, 1] * R[:, 1, 0] + R[:, 0, 0] * R[:, 1, 1] + + T2 = mutil.vecMVCOBMatrix(R) + + assert np.allclose(T, T2)