From eb115a0b63371fb6e3ccd74e4a88fa8fe7f92ce7 Mon Sep 17 00:00:00 2001 From: Kevin Lewis Date: Thu, 4 Jul 2024 11:37:20 -0400 Subject: [PATCH] PEP8 fixes --- hexrd/distortion/dexela_2923.py | 1 + hexrd/gridutil.py | 1 + hexrd/indexer.py | 226 +++++--- hexrd/instrument/detector.py | 527 +++++++++++------- hexrd/sampleOrientations/conversions.py | 11 +- hexrd/sampleOrientations/rfz.py | 6 +- hexrd/transforms/xf.py | 489 ++++++++++------- hexrd/wppf/derivatives.py | 26 + hexrd/wppf/peakfunctions.py | 686 ++++++++++++------------ 9 files changed, 1140 insertions(+), 833 deletions(-) diff --git a/hexrd/distortion/dexela_2923.py b/hexrd/distortion/dexela_2923.py index 16703738e..9495448b5 100644 --- a/hexrd/distortion/dexela_2923.py +++ b/hexrd/distortion/dexela_2923.py @@ -86,6 +86,7 @@ def _dexela_2923_distortion(out_, in_, params): # 1st quadrant out_[el, :] = in_[el, :] + params[0:2] + @numba.njit(nogil=True, cache=True) def _dexela_2923_inverse_distortion(out_, in_, params): for el in range(len(in_)): diff --git a/hexrd/gridutil.py b/hexrd/gridutil.py index 4c52ab569..5958cdfe4 100644 --- a/hexrd/gridutil.py +++ b/hexrd/gridutil.py @@ -89,6 +89,7 @@ def cellIndices(edges, points_1d): # idx[off_hi] = np.nan return np.array(idx, dtype=int) + @numba.njit(nogil=True, cache=True) def _fill_connectivity(out, m, n, p): i_con = 0 diff --git a/hexrd/indexer.py b/hexrd/indexer.py index 3f2adfec6..b3c9ebb5c 100644 --- a/hexrd/indexer.py +++ b/hexrd/indexer.py @@ -43,7 +43,7 @@ # ============================================================================= # Parameters # ============================================================================= -omega_period_DFLT = np.radians(np.r_[-180., 180.]) +omega_period_DFLT = np.radians(np.r_[-180.0, 180.0]) paramMP = None nCPUs_DFLT = multiprocessing.cpu_count() @@ -53,13 +53,20 @@ # ============================================================================= # Methods # ============================================================================= -def paintGrid(quats, etaOmeMaps, - threshold=None, bMat=None, - omegaRange=None, etaRange=None, - omeTol=constants.d2r, etaTol=constants.d2r, - omePeriod=omega_period_DFLT, - doMultiProc=False, - nCPUs=None, debug=False): +def paintGrid( + quats, + etaOmeMaps, + threshold=None, + bMat=None, + omegaRange=None, + etaRange=None, + omeTol=constants.d2r, + etaTol=constants.d2r, + omePeriod=omega_period_DFLT, + doMultiProc=False, + nCPUs=None, + debug=False, +): r""" Spherical map-based indexing algorithm, i.e. paintGrid. @@ -156,10 +163,7 @@ def paintGrid(quats, etaOmeMaps, # !!! these are master hklIDs hklIDs = np.asarray(etaOmeMaps.iHKLList) hklList = planeData.getHKLs(*hklIDs).tolist() - hkl_idx = planeData.getHKLID( - planeData.getHKLs(*hklIDs).T, - master=False - ) + hkl_idx = planeData.getHKLID(planeData.getHKLs(*hklIDs).T, master=False) nHKLS = len(hklIDs) numEtas = len(etaOmeMaps.etaEdges) - 1 @@ -169,8 +173,10 @@ def paintGrid(quats, etaOmeMaps, threshold = np.zeros(nHKLS) for i in range(nHKLS): threshold[i] = np.mean( - np.r_[np.mean(etaOmeMaps.dataStore[i]), - np.median(etaOmeMaps.dataStore[i])] + np.r_[ + np.mean(etaOmeMaps.dataStore[i]), + np.median(etaOmeMaps.dataStore[i]), + ] ) elif threshold is not None and not hasattr(threshold, '__len__'): threshold = threshold * np.ones(nHKLS) @@ -182,7 +188,7 @@ def paintGrid(quats, etaOmeMaps, else: raise RuntimeError( "unknown threshold option. should be a list of numbers or None" - ) + ) if bMat is None: bMat = planeData.latVecOps['B'] @@ -194,14 +200,22 @@ def paintGrid(quats, etaOmeMaps, omeMin = None omeMax = None if omegaRange is None: # FIXME - omeMin = [np.min(etaOmeMaps.omeEdges), ] - omeMax = [np.max(etaOmeMaps.omeEdges), ] + omeMin = [ + np.min(etaOmeMaps.omeEdges), + ] + omeMax = [ + np.max(etaOmeMaps.omeEdges), + ] else: omeMin = [omegaRange[i][0] for i in range(len(omegaRange))] omeMax = [omegaRange[i][1] for i in range(len(omegaRange))] if omeMin is None: - omeMin = [-np.pi, ] - omeMax = [np.pi, ] + omeMin = [ + -np.pi, + ] + omeMax = [ + np.pi, + ] omeMin = np.asarray(omeMin) omeMax = np.asarray(omeMax) @@ -211,8 +225,12 @@ def paintGrid(quats, etaOmeMaps, etaMin = [etaRange[i][0] for i in range(len(etaRange))] etaMax = [etaRange[i][1] for i in range(len(etaRange))] if etaMin is None: - etaMin = [-np.pi, ] - etaMax = [np.pi, ] + etaMin = [ + -np.pi, + ] + etaMax = [ + np.pi, + ] etaMin = np.asarray(etaMin) etaMax = np.asarray(etaMax) @@ -223,8 +241,9 @@ def paintGrid(quats, etaOmeMaps, chunksize = min(quats.shape[1] // nCPUs, 10) logger.info( "using multiprocessing with %d processes and a chunk size of %d", - nCPUs, chunksize - ) + nCPUs, + chunksize, + ) else: logger.info("running in serial mode") nCPUs = 1 @@ -258,24 +277,24 @@ def paintGrid(quats, etaOmeMaps, 'etaEdges': etaOmeMaps.etaEdges, 'etaOmeMaps': np.stack(etaOmeMaps.dataStore), 'bMat': bMat, - 'threshold': np.asarray(threshold) - } + 'threshold': np.asarray(threshold), + } # do the mapping start = timeit.default_timer() retval = None if multiProcMode: # multiple process version - pool = multiprocessing.Pool(nCPUs, paintgrid_init, (params, )) + pool = multiprocessing.Pool(nCPUs, paintgrid_init, (params,)) retval = pool.map(paintGridThis, quats.T, chunksize=chunksize) pool.close() else: # single process version. global paramMP - paintgrid_init(params) # sets paramMP + paintgrid_init(params) # sets paramMP retval = list(map(paintGridThis, quats.T)) - paramMP = None # clear paramMP - elapsed = (timeit.default_timer() - start) + paramMP = None # clear paramMP + elapsed = timeit.default_timer() - start logger.info("paintGrid took %.3f seconds", elapsed) return retval @@ -356,18 +375,19 @@ def _normalize_ranges(starts, stops, offset, ccw=False): # return the full range two_pi = 2 * np.pi if np.any((starts + two_pi) < stops + 1e-8): - return np.array([offset, two_pi+offset]) + return np.array([offset, two_pi + offset]) starts = np.mod(starts - offset, two_pi) + offset stops = np.mod(stops - offset, two_pi) + offset order = np.argsort(starts) - result = np.hstack((starts[order, np.newaxis], - stops[order, np.newaxis])).ravel() + result = np.hstack( + (starts[order, np.newaxis], stops[order, np.newaxis]) + ).ravel() # at this point, result is in its final form unless there # is wrap-around in the last segment. Handle this case: if result[-1] < result[-2]: - new_result = np.empty((len(result)+2,), dtype=result.dtype) + new_result = np.empty((len(result) + 2,), dtype=result.dtype) new_result[0] = offset new_result[1] = result[-1] new_result[2:-1] = result[0:-1] @@ -402,13 +422,13 @@ def paintgrid_init(params): # instead of building etaMin/etaMax and omeMin/omeMax. It may also # be worth handling range overlap and maybe "optimize" ranges if # there happens to be contiguous spans. - paramMP['valid_eta_spans'] = _normalize_ranges(paramMP['etaMin'], - paramMP['etaMax'], - -np.pi) + paramMP['valid_eta_spans'] = _normalize_ranges( + paramMP['etaMin'], paramMP['etaMax'], -np.pi + ) - paramMP['valid_ome_spans'] = _normalize_ranges(paramMP['omeMin'], - paramMP['omeMax'], - min(paramMP['omePeriod'])) + paramMP['valid_ome_spans'] = _normalize_ranges( + paramMP['omeMin'], paramMP['omeMax'], min(paramMP['omePeriod']) + ) return @@ -443,11 +463,11 @@ def _check_dilated(eta, ome, dpix_eta, dpix_ome, etaOmeMap, threshold): i_max, j_max = etaOmeMap.shape ome_start, ome_stop = ( max(ome - dpix_ome, 0), - min(ome + dpix_ome + 1, i_max) + min(ome + dpix_ome + 1, i_max), ) eta_start, eta_stop = ( max(eta - dpix_eta, 0), - min(eta + dpix_eta + 1, j_max) + min(eta + dpix_eta + 1, j_max), ) for i in range(ome_start, ome_stop): @@ -505,14 +525,27 @@ def paintGridThis(quat): rMat = xfcapi.makeRotMatOfQuat(quat) # Compute the oscillation angles of all the symHKLs at once - oangs_pair = xfcapi.oscillAnglesOfHKLs(symHKLs, 0., rMat, bMat, - wavelength) + oangs_pair = xfcapi.oscillAnglesOfHKLs( + symHKLs, 0.0, rMat, bMat, wavelength + ) # pdb.set_trace() - return _filter_and_count_hits(oangs_pair[0], oangs_pair[1], symHKLs_ix, - etaEdges, valid_eta_spans, - valid_ome_spans, omeEdges, omePeriod, - etaOmeMaps, etaIndices, omeIndices, - dpix_eta, dpix_ome, threshold) + return _filter_and_count_hits( + oangs_pair[0], + oangs_pair[1], + symHKLs_ix, + etaEdges, + valid_eta_spans, + valid_ome_spans, + omeEdges, + omePeriod, + etaOmeMaps, + etaIndices, + omeIndices, + dpix_eta, + dpix_ome, + threshold, + ) + @numba.njit(nogil=True, cache=True) def _find_in_range(value, spans): @@ -541,14 +574,28 @@ def _find_in_range(value, spans): if value < spans[mi]: ri = mi else: - li = mi+1 + li = mi + 1 return li + @numba.njit(nogil=True, cache=True) -def _angle_is_hit(ang, eta_offset, ome_offset, hkl, valid_eta_spans, - valid_ome_spans, etaEdges, omeEdges, etaOmeMaps, - etaIndices, omeIndices, dpix_eta, dpix_ome, threshold): +def _angle_is_hit( + ang, + eta_offset, + ome_offset, + hkl, + valid_eta_spans, + valid_ome_spans, + etaEdges, + omeEdges, + etaOmeMaps, + etaIndices, + omeIndices, + dpix_eta, + dpix_ome, + threshold, +): """Perform work on one of the angles. This includes: @@ -598,18 +645,32 @@ def _angle_is_hit(ang, eta_offset, ome_offset, hkl, valid_eta_spans, eta = etaIndices[eta_idx] ome = omeIndices[ome_idx] - isHit = _check_dilated(eta, ome, dpix_eta, dpix_ome, - etaOmeMaps[hkl], threshold[hkl]) + isHit = _check_dilated( + eta, ome, dpix_eta, dpix_ome, etaOmeMaps[hkl], threshold[hkl] + ) if isHit == -1: return 0, 0 else: return isHit, 1 + @numba.njit(nogil=True, cache=True) -def _filter_and_count_hits(angs_0, angs_1, symHKLs_ix, etaEdges, - valid_eta_spans, valid_ome_spans, omeEdges, - omePeriod, etaOmeMaps, etaIndices, omeIndices, - dpix_eta, dpix_ome, threshold): +def _filter_and_count_hits( + angs_0, + angs_1, + symHKLs_ix, + etaEdges, + valid_eta_spans, + valid_ome_spans, + omeEdges, + omePeriod, + etaOmeMaps, + etaIndices, + omeIndices, + dpix_eta, + dpix_ome, + threshold, +): """Accumulate completeness scores. assumes: @@ -633,33 +694,52 @@ def _filter_and_count_hits(angs_0, angs_1, symHKLs_ix, etaEdges, for i in range(count): if i >= end_curr: curr_hkl_idx += 1 - end_curr = symHKLs_ix[curr_hkl_idx+1] + end_curr = symHKLs_ix[curr_hkl_idx + 1] # first solution hit, not_filtered = _angle_is_hit( - angs_0[i], eta_offset, ome_offset, - curr_hkl_idx, valid_eta_spans, - valid_ome_spans, etaEdges, - omeEdges, etaOmeMaps, etaIndices, - omeIndices, dpix_eta, dpix_ome, - threshold) + angs_0[i], + eta_offset, + ome_offset, + curr_hkl_idx, + valid_eta_spans, + valid_ome_spans, + etaEdges, + omeEdges, + etaOmeMaps, + etaIndices, + omeIndices, + dpix_eta, + dpix_ome, + threshold, + ) hits += hit total += not_filtered # second solution hit, not_filtered = _angle_is_hit( - angs_1[i], eta_offset, ome_offset, - curr_hkl_idx, valid_eta_spans, - valid_ome_spans, etaEdges, - omeEdges, etaOmeMaps, etaIndices, - omeIndices, dpix_eta, dpix_ome, - threshold) + angs_1[i], + eta_offset, + ome_offset, + curr_hkl_idx, + valid_eta_spans, + valid_ome_spans, + etaEdges, + omeEdges, + etaOmeMaps, + etaIndices, + omeIndices, + dpix_eta, + dpix_ome, + threshold, + ) hits += hit total += not_filtered - return float(hits)/float(total) if total != 0 else 0.0 + return float(hits) / float(total) if total != 0 else 0.0 + @numba.njit(nogil=True, cache=True) def _map_angle(angle, offset): """Numba-firendly equivalent to xf.mapAngle.""" - return np.mod(angle-offset, 2*np.pi)+offset + return np.mod(angle - offset, 2 * np.pi) + offset diff --git a/hexrd/instrument/detector.py b/hexrd/instrument/detector.py index 616c304b0..ab48b517e 100644 --- a/hexrd/instrument/detector.py +++ b/hexrd/instrument/detector.py @@ -30,10 +30,7 @@ max_workers_DFLT = max(1, os.cpu_count() - 1) -panel_calibration_flags_DFLT = np.array( - [1, 1, 1, 1, 1, 1], - dtype=bool -) +panel_calibration_flags_DFLT = np.array([1, 1, 1, 1, 1, 1], dtype=bool) beam_energy_DFLT = 65.351 @@ -50,6 +47,7 @@ class Detector: common to planar and cylindrical detectors. This class will be inherited by both those classes. """ + __pixelPitchUnit = 'mm' # Abstract methods that must be redefined in derived classes @@ -59,8 +57,14 @@ def detector_type(self): pass @abstractmethod - def cart_to_angles(self, xy_data, rmat_s=None, tvec_s=None, - tvec_c=None, apply_distortion=False): + def cart_to_angles( + self, + xy_data, + rmat_s=None, + tvec_s=None, + tvec_c=None, + apply_distortion=False, + ): """ Transform cartesian coordinates to angular. @@ -93,10 +97,15 @@ def cart_to_angles(self, xy_data, rmat_s=None, tvec_s=None, pass @abstractmethod - def angles_to_cart(self, tth_eta, - rmat_s=None, tvec_s=None, - rmat_c=None, tvec_c=None, - apply_distortion=False): + def angles_to_cart( + self, + tth_eta, + rmat_s=None, + tvec_s=None, + rmat_c=None, + tvec_c=None, + apply_distortion=False, + ): """ Transform angular coordinates to cartesian. @@ -159,21 +168,25 @@ def extra_config_kwargs(self): # End of abstract methods - def __init__(self, - rows=2048, cols=2048, - pixel_size=(0.2, 0.2), - tvec=np.r_[0., 0., -1000.], - tilt=ct.zeros_3, - name='default', - bvec=ct.beam_vec, - xrs_dist=None, - evec=ct.eta_vec, - saturation_level=None, - panel_buffer=None, - tth_distortion=None, - roi=None, group=None, - distortion=None, - max_workers=max_workers_DFLT): + def __init__( + self, + rows=2048, + cols=2048, + pixel_size=(0.2, 0.2), + tvec=np.r_[0.0, 0.0, -1000.0], + tilt=ct.zeros_3, + name='default', + bvec=ct.beam_vec, + xrs_dist=None, + evec=ct.eta_vec, + saturation_level=None, + panel_buffer=None, + tth_distortion=None, + roi=None, + group=None, + distortion=None, + max_workers=max_workers_DFLT, + ): """ Instantiate a PlanarDetector object. @@ -230,10 +243,11 @@ def __init__(self, if roi is None: self._roi = roi else: - assert len(roi) == 2, \ - "roi is set via (start_row, start_col)" - self._roi = ((roi[0], roi[0] + self._rows), - (roi[1], roi[1] + self._cols)) + assert len(roi) == 2, "roi is set via (start_row, start_col)" + self._roi = ( + (roi[0], roi[0] + self._rows), + (roi[1], roi[1] + self._cols), + ) self._tvec = np.array(tvec).flatten() self._tilt = np.array(tilt).flatten() @@ -259,12 +273,11 @@ def __init__(self, if self._distortion is not None: dparams = self._distortion.params self._calibration_parameters = np.hstack( - [self._tilt, self._tvec, dparams] - ) + [self._tilt, self._tvec, dparams] + ) self._calibration_flags = np.hstack( - [panel_calibration_flags_DFLT, - np.zeros(len(dparams), dtype=bool)] - ) + [panel_calibration_flags_DFLT, np.zeros(len(dparams), dtype=bool)] + ) # detector ID @property @@ -362,10 +375,13 @@ def roi(self, vertex_array): !!! vertex array must be (r0, c0) """ if vertex_array is not None: - assert len(vertex_array) == 2, \ - "roi is set via (start_row, start_col)" - self._roi = ((vertex_array[0], vertex_array[0] + self.rows), - (vertex_array[1], vertex_array[1] + self.cols)) + assert ( + len(vertex_array) == 2 + ), "roi is set via (start_row, start_col)" + self._roi = ( + (vertex_array[0], vertex_array[0] + self.rows), + (vertex_array[1], vertex_array[1] + self.cols), + ) @property def row_dim(self): @@ -377,7 +393,9 @@ def col_dim(self): @property def row_pixel_vec(self): - return self.pixel_size_row*(0.5*(self.rows-1)-np.arange(self.rows)) + return self.pixel_size_row * ( + 0.5 * (self.rows - 1) - np.arange(self.rows) + ) @property def row_edge_vec(self): @@ -385,7 +403,9 @@ def row_edge_vec(self): @property def col_pixel_vec(self): - return self.pixel_size_col*(np.arange(self.cols)-0.5*(self.cols-1)) + return self.pixel_size_col * ( + np.arange(self.cols) - 0.5 * (self.cols - 1) + ) @property def col_edge_vec(self): @@ -393,7 +413,7 @@ def col_edge_vec(self): @property def corner_ul(self): - return np.r_[-0.5 * self.col_dim, 0.5 * self.row_dim] + return np.r_[-0.5 * self.col_dim, 0.5 * self.row_dim] @property def corner_ll(self): @@ -405,7 +425,7 @@ def corner_lr(self): @property def corner_ur(self): - return np.r_[0.5 * self.col_dim, 0.5 * self.row_dim] + return np.r_[0.5 * self.col_dim, 0.5 * self.row_dim] @property def shape(self): @@ -437,8 +457,9 @@ def bvec(self): @bvec.setter def bvec(self, x): x = np.array(x).flatten() - assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \ - 'input must have length = 3 and have unit magnitude' + assert ( + len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' self._bvec = x @property @@ -447,8 +468,9 @@ def xrs_dist(self): @xrs_dist.setter def xrs_dist(self, x): - assert x is None or np.isscalar(x), \ - f"'source_distance' must be None or scalar; you input '{x}'" + assert x is None or np.isscalar( + x + ), f"'source_distance' must be None or scalar; you input '{x}'" self._xrs_dist = x @property @@ -458,8 +480,9 @@ def evec(self): @evec.setter def evec(self, x): x = np.array(x).flatten() - assert len(x) == 3 and sum(x*x) > 1-ct.sqrt_epsf, \ - 'input must have length = 3 and have unit magnitude' + assert ( + len(x) == 3 and sum(x * x) > 1 - ct.sqrt_epsf + ), 'input must have length = 3 and have unit magnitude' self._evec = x @property @@ -472,8 +495,7 @@ def distortion(self, x): check_arg = np.zeros(len(distortion_registry), dtype=bool) for i, dcls in enumerate(distortion_registry.values()): check_arg[i] = isinstance(x, dcls) - assert np.any(check_arg), \ - 'input distortion is not in registry!' + assert np.any(check_arg), 'input distortion is not in registry!' self._distortion = x @property @@ -488,8 +510,8 @@ def normal(self): @property def pixel_coords(self): pix_i, pix_j = np.meshgrid( - self.row_pixel_vec, self.col_pixel_vec, - indexing='ij') + self.row_pixel_vec, self.col_pixel_vec, indexing='ij' + ) return pix_i, pix_j @property @@ -504,8 +526,8 @@ def calibration_parameters(self): if self.distortion is not None: dparams = self.distortion.params self._calibration_parameters = np.hstack( - [self.tilt, self.tvec, dparams] - ) + [self.tilt, self.tvec, dparams] + ) return self._calibration_parameters @property @@ -571,14 +593,18 @@ def polarization_factor(self, f_hor, f_vert, unpolarized=False): """ s = f_hor + f_vert if np.abs(s - 1) > ct.sqrt_epsf: - msg = ("sum of fraction of " - "horizontal and vertical polarizations " - "must be equal to 1.") + msg = ( + "sum of fraction of " + "horizontal and vertical polarizations " + "must be equal to 1." + ) raise RuntimeError(msg) if f_hor < 0 or f_vert < 0: - msg = ("fraction of polarization in horizontal " - "or vertical directions can't be negative.") + msg = ( + "fraction of polarization in horizontal " + "or vertical directions can't be negative." + ) raise RuntimeError(msg) tth, eta = self.pixel_angles() @@ -614,9 +640,16 @@ def lorentz_factor(self): tth, eta = self.pixel_angles() return _lorentz_factor(tth) - def config_dict(self, chi=0, tvec=ct.zeros_3, - beam_energy=beam_energy_DFLT, beam_vector=ct.beam_vec, - sat_level=None, panel_buffer=None, style='yaml'): + def config_dict( + self, + chi=0, + tvec=ct.zeros_3, + beam_energy=beam_energy_DFLT, + beam_vector=ct.beam_vec, + sat_level=None, + panel_buffer=None, + style='yaml', + ): """ Return a dictionary of detector parameters. @@ -644,8 +677,9 @@ def config_dict(self, chi=0, tvec=ct.zeros_3, DESCRIPTION. """ - assert style.lower() in ['yaml', 'hdf5'], \ + assert style.lower() in ['yaml', 'hdf5'], ( "style must be either 'yaml', or 'hdf5'; you gave '%s'" % style + ) config_dict = {} @@ -657,8 +691,11 @@ def config_dict(self, chi=0, tvec=ct.zeros_3, # assign local vars; listify if necessary tilt = self.tilt translation = self.tvec - roi = None if self.roi is None \ + roi = ( + None + if self.roi is None else np.array([self.roi[0][0], self.roi[1][0]]).flatten() + ) if style.lower() == 'yaml': tilt = tilt.tolist() translation = translation.tolist() @@ -674,9 +711,8 @@ def config_dict(self, chi=0, tvec=ct.zeros_3, pixels=dict( rows=int(self.rows), columns=int(self.cols), - size=[float(self.pixel_size_row), - float(self.pixel_size_col)], - ) + size=[float(self.pixel_size_row), float(self.pixel_size_col)], + ), ) if roi is not None: @@ -693,8 +729,7 @@ def config_dict(self, chi=0, tvec=ct.zeros_3, if style.lower() == 'yaml': dparams = dparams.tolist() dist_d = dict( - function_name=self.distortion.maptype, - parameters=dparams + function_name=self.distortion.maptype, parameters=dparams ) det_dict['distortion'] = dist_d @@ -710,8 +745,7 @@ def config_dict(self, chi=0, tvec=ct.zeros_3, # !!! now we have to do some style-dependent munging of panel_buffer if isinstance(panel_buffer, np.ndarray): if panel_buffer.ndim == 1: - assert len(panel_buffer) == 2, \ - "length of 1-d buffer must be 2" + assert len(panel_buffer) == 2, "length of 1-d buffer must be 2" # if here is a 2-element array if style.lower() == 'yaml': panel_buffer = panel_buffer.tolist() @@ -720,7 +754,7 @@ def config_dict(self, chi=0, tvec=ct.zeros_3, # !!! can't practically write array-like buffers to YAML # so forced to clobber print("clobbering panel buffer array in yaml-ready output") - panel_buffer = [0., 0.] + panel_buffer = [0.0, 0.0] else: raise RuntimeError( "panel buffer ndim must be 1 or 2; you specified %d" @@ -742,10 +776,7 @@ def config_dict(self, chi=0, tvec=ct.zeros_3, # ===================================================================== # SAMPLE STAGE PARAMETERS # ===================================================================== - stage_dict = dict( - chi=chi, - translation=tvec - ) + stage_dict = dict(chi=chi, translation=tvec) # ===================================================================== # BEAM PARAMETERS @@ -759,10 +790,7 @@ def config_dict(self, chi=0, tvec=ct.zeros_3, # polar_angle=pola # ) # ) - beam_dict = dict( - energy=beam_energy, - vector=beam_vector - ) + beam_dict = dict(energy=beam_energy, vector=beam_vector) config_dict['detector'] = det_dict config_dict['oscillation_stage'] = stage_dict @@ -821,10 +849,10 @@ def pixelToCart(self, ij_det): """ ij_det = np.atleast_2d(ij_det) - x = (ij_det[:, 1] + 0.5)*self.pixel_size_col\ - + self.corner_ll[0] - y = (self.rows - ij_det[:, 0] - 0.5)*self.pixel_size_row\ - + self.corner_ll[1] + x = (ij_det[:, 1] + 0.5) * self.pixel_size_col + self.corner_ll[0] + y = ( + self.rows - ij_det[:, 0] - 0.5 + ) * self.pixel_size_row + self.corner_ll[1] return np.vstack([x, y]).T def angularPixelSize(self, xy, rMat_s=None, tVec_s=None, tVec_c=None): @@ -862,11 +890,17 @@ def angularPixelSize(self, xy, rMat_s=None, tVec_s=None, tVec_c=None): ''' # call xrdutil function ang_ps = xrdutil.angularPixelSize( - xy, (self.pixel_size_row, self.pixel_size_col), - self.rmat, rMat_s, - self.tvec, tVec_s, tVec_c, + xy, + (self.pixel_size_row, self.pixel_size_col), + self.rmat, + rMat_s, + self.tvec, + tVec_s, + tVec_c, distortion=self.distortion, - beamVec=self.bvec, etaVec=self.evec) + beamVec=self.bvec, + etaVec=self.evec, + ) return ang_ps def clip_to_panel(self, xy, buffer_edges=True): @@ -891,8 +925,8 @@ def clip_to_panel(self, xy, buffer_edges=True): on_panel = np.logical_and(on_panel_rows, on_panel_cols) else: ''' - xlim = 0.5*self.col_dim - ylim = 0.5*self.row_dim + xlim = 0.5 * self.col_dim + ylim = 0.5 * self.row_dim if buffer_edges and self.panel_buffer is not None: if self.panel_buffer.ndim == 2: pix = self.cartToPixel(xy, pixels=True) @@ -904,7 +938,9 @@ def clip_to_panel(self, xy, buffer_edges=True): on_panel = np.full(pix.shape[0], False) valid_pix = pix[~idx, :] - on_panel[~idx] = self.panel_buffer[valid_pix[:, 0], valid_pix[:, 1]] + on_panel[~idx] = self.panel_buffer[ + valid_pix[:, 0], valid_pix[:, 1] + ] else: xlim -= self.panel_buffer[0] ylim -= self.panel_buffer[1] @@ -916,12 +952,8 @@ def clip_to_panel(self, xy, buffer_edges=True): ) on_panel = np.logical_and(on_panel_x, on_panel_y) elif not buffer_edges or self.panel_buffer is None: - on_panel_x = np.logical_and( - xy[:, 0] >= -xlim, xy[:, 0] <= xlim - ) - on_panel_y = np.logical_and( - xy[:, 1] >= -ylim, xy[:, 1] <= ylim - ) + on_panel_x = np.logical_and(xy[:, 0] >= -xlim, xy[:, 0] <= xlim) + on_panel_y = np.logical_and(xy[:, 1] >= -ylim, xy[:, 1] <= ylim) on_panel = np.logical_and(on_panel_x, on_panel_y) return xy[on_panel, :], on_panel @@ -932,13 +964,16 @@ def interpolate_nearest(self, xy, img, pad_with_nans=True): """ is_2d = img.ndim == 2 right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols - assert is_2d and right_shape,\ - "input image must be 2-d with shape (%d, %d)"\ - % (self.rows, self.cols) + assert ( + is_2d and right_shape + ), "input image must be 2-d with shape (%d, %d)" % ( + self.rows, + self.cols, + ) # initialize output with nans if pad_with_nans: - int_xy = np.nan*np.ones(len(xy)) + int_xy = np.nan * np.ones(len(xy)) else: int_xy = np.zeros(len(xy)) @@ -954,7 +989,6 @@ def interpolate_nearest(self, xy, img, pad_with_nans=True): int_xy[on_panel] = int_vals return int_xy - def interpolate_bilinear(self, xy, img, pad_with_nans=True): """ Interpolate an image array at the specified cartesian points. @@ -983,13 +1017,16 @@ def interpolate_bilinear(self, xy, img, pad_with_nans=True): is_2d = img.ndim == 2 right_shape = img.shape[0] == self.rows and img.shape[1] == self.cols - assert is_2d and right_shape,\ - "input image must be 2-d with shape (%d, %d)"\ - % (self.rows, self.cols) + assert ( + is_2d and right_shape + ), "input image must be 2-d with shape (%d, %d)" % ( + self.rows, + self.cols, + ) # initialize output with nans if pad_with_nans: - int_xy = np.nan*np.ones(len(xy)) + int_xy = np.nan * np.ones(len(xy)) else: int_xy = np.zeros(len(xy)) @@ -1016,25 +1053,34 @@ def interpolate_bilinear(self, xy, img, pad_with_nans=True): j_ceil_img = _fix_indices(j_ceil, 0, self.cols - 1) # first interpolate at top/bottom rows - row_floor_int = \ - (j_ceil - ij_frac[:, 1])*img[i_floor_img, j_floor_img] \ - + (ij_frac[:, 1] - j_floor)*img[i_floor_img, j_ceil_img] - row_ceil_int = \ - (j_ceil - ij_frac[:, 1])*img[i_ceil_img, j_floor_img] \ - + (ij_frac[:, 1] - j_floor)*img[i_ceil_img, j_ceil_img] + row_floor_int = (j_ceil - ij_frac[:, 1]) * img[ + i_floor_img, j_floor_img + ] + (ij_frac[:, 1] - j_floor) * img[i_floor_img, j_ceil_img] + row_ceil_int = (j_ceil - ij_frac[:, 1]) * img[ + i_ceil_img, j_floor_img + ] + (ij_frac[:, 1] - j_floor) * img[i_ceil_img, j_ceil_img] # next interpolate across cols - int_vals = \ - (i_ceil - ij_frac[:, 0])*row_floor_int \ - + (ij_frac[:, 0] - i_floor)*row_ceil_int + int_vals = (i_ceil - ij_frac[:, 0]) * row_floor_int + ( + ij_frac[:, 0] - i_floor + ) * row_ceil_int int_xy[on_panel] = int_vals return int_xy def make_powder_rings( - self, pd, merge_hkls=False, delta_tth=None, - delta_eta=10., eta_period=None, eta_list=None, - rmat_s=ct.identity_3x3, tvec_s=ct.zeros_3, - tvec_c=ct.zeros_3, full_output=False, tth_distortion=None): + self, + pd, + merge_hkls=False, + delta_tth=None, + delta_eta=10.0, + eta_period=None, + eta_list=None, + rmat_s=ct.identity_3x3, + tvec_s=ct.zeros_3, + tvec_c=ct.zeros_3, + full_output=False, + tth_distortion=None, + ): """ Generate points on Debye_Scherrer rings over the detector. @@ -1079,8 +1125,9 @@ def make_powder_rings( """ if tth_distortion is not None: tnorms = rowNorm(np.vstack([tvec_s, tvec_c])) - assert np.all(tnorms) < ct.sqrt_epsf, \ - "If using distrotion function, translations must be zero" + assert ( + np.all(tnorms) < ct.sqrt_epsf + ), "If using distrotion function, translations must be zero" # in case you want to give it tth angles directly if isinstance(pd, PlaneData): @@ -1109,28 +1156,50 @@ def make_powder_rings( tth = pd.getTTh() tth_pm = tth_ranges - np.tile(tth, (2, 1)).T sector_vertices = np.vstack( - [[i[0], -del_eta, - i[0], del_eta, - i[1], del_eta, - i[1], -del_eta, - 0.0, 0.0] - for i in tth_pm]) + [ + [ + i[0], + -del_eta, + i[0], + del_eta, + i[1], + del_eta, + i[1], + -del_eta, + 0.0, + 0.0, + ] + for i in tth_pm + ] + ) else: # Okay, we have a array-like tth specification tth = np.array(pd).flatten() if delta_tth is None: raise RuntimeError( "If supplying a 2theta list as first arg, " - + "must supply a delta_tth") - tth_pm = 0.5*delta_tth*np.r_[-1., 1.] + + "must supply a delta_tth" + ) + tth_pm = 0.5 * delta_tth * np.r_[-1.0, 1.0] tth_ranges = np.radians([i + tth_pm for i in tth]) # !!! units sector_vertices = np.tile( - 0.5*np.radians([-delta_tth, -delta_eta, - -delta_tth, delta_eta, - delta_tth, delta_eta, - delta_tth, -delta_eta, - 0.0, 0.0]), (len(tth), 1) - ) + 0.5 + * np.radians( + [ + -delta_tth, + -delta_eta, + -delta_tth, + delta_eta, + delta_tth, + delta_eta, + delta_tth, + -delta_eta, + 0.0, + 0.0, + ] + ), + (len(tth), 1), + ) # !! conversions, meh... tth = np.radians(tth) del_eta = np.radians(delta_eta) @@ -1140,13 +1209,12 @@ def make_powder_rings( eta_period = (-np.pi, np.pi) if eta_list is None: - neta = int(360./float(delta_eta)) + neta = int(360.0 / float(delta_eta)) # this is the vector of ETA EDGES eta_edges = mapAngle( - np.radians( - delta_eta*np.linspace(0., neta, num=neta + 1) - ) + eta_period[0], - eta_period + np.radians(delta_eta * np.linspace(0.0, neta, num=neta + 1)) + + eta_period[0], + eta_period, ) # get eta bin centers from edges @@ -1157,13 +1225,13 @@ def make_powder_rings( axis=0) """ # !!! should be safe as eta_edges are monotonic - eta_centers = eta_edges[:-1] + 0.5*del_eta + eta_centers = eta_edges[:-1] + 0.5 * del_eta else: eta_centers = np.radians(eta_list).flatten() neta = len(eta_centers) eta_edges = ( - np.tile(eta_centers, (2, 1)) + - np.tile(0.5*del_eta*np.r_[-1, 1], (neta, 1)).T + np.tile(eta_centers, (2, 1)) + + np.tile(0.5 * del_eta * np.r_[-1, 1], (neta, 1)).T ).T.flatten() # get chi and ome from rmat_s @@ -1173,8 +1241,10 @@ def make_powder_rings( ome = np.arctan2(rmat_s[0, 2], rmat_s[0, 0]) # make list of angle tuples - angs = [np.vstack([i*np.ones(neta), eta_centers, ome*np.ones(neta)]) - for i in tth] + angs = [ + np.vstack([i * np.ones(neta), eta_centers, ome * np.ones(neta)]) + for i in tth + ] # need xy coords and pixel sizes valid_ang = [] @@ -1191,15 +1261,18 @@ def make_powder_rings( patch_vertices = ( np.tile(these_angs[:, :2], (1, npp)) + np.tile(sector_vertices[i_ring], (neta, 1)) - ).reshape(npp*neta, 2) + ).reshape(npp * neta, 2) # find vertices that all fall on the panel # !!! not API ambiguity regarding rmat_s above all_xy = self.angles_to_cart( patch_vertices, - rmat_s=rmat_s, tvec_s=tvec_s, - rmat_c=None, tvec_c=tvec_c, - apply_distortion=True) + rmat_s=rmat_s, + tvec_s=tvec_s, + rmat_c=None, + tvec_c=tvec_c, + apply_distortion=True, + ) _, on_panel = self.clip_to_panel(all_xy) @@ -1214,10 +1287,11 @@ def make_powder_rings( if tth_distortion is not None: patch_valid_angs = tth_distortion.apply( self.angles_to_cart(these_angs[patch_is_on, :2]), - return_nominal=True + return_nominal=True, + ) + patch_valid_xys = self.angles_to_cart( + patch_valid_angs, apply_distortion=True ) - patch_valid_xys = self.angles_to_cart(patch_valid_angs, - apply_distortion=True) else: patch_valid_angs = these_angs[patch_is_on, :2] patch_valid_xys = patch_xys[:, -1, :].squeeze() @@ -1274,20 +1348,30 @@ def map_to_plane(self, pts, rmat, tvec): pts_lab = np.dot(self.rmat, pts_det.T) + tvec_d_lab # scaling along pts vectors to hit map plane - u = np.dot(nvec_map_lab.T, tvec_map_lab) \ - / np.dot(nvec_map_lab.T, pts_lab) + u = np.dot(nvec_map_lab.T, tvec_map_lab) / np.dot( + nvec_map_lab.T, pts_lab + ) # pts on map plane, in LAB FRAME pts_map_lab = np.tile(u, (3, 1)) * pts_lab return np.dot(rmat.T, pts_map_lab - tvec_map_lab)[:2, :].T - def simulate_rotation_series(self, plane_data, grain_param_list, - eta_ranges=[(-np.pi, np.pi), ], - ome_ranges=[(-np.pi, np.pi), ], - ome_period=(-np.pi, np.pi), - chi=0., tVec_s=ct.zeros_3, - wavelength=None): + def simulate_rotation_series( + self, + plane_data, + grain_param_list, + eta_ranges=[ + (-np.pi, np.pi), + ], + ome_ranges=[ + (-np.pi, np.pi), + ], + ome_period=(-np.pi, np.pi), + chi=0.0, + tVec_s=ct.zeros_3, + wavelength=None, + ): """ Simulate a monochromatic rotation series for a list of grains. @@ -1335,8 +1419,9 @@ def simulate_rotation_series(self, plane_data, grain_param_list, else: if plane_data.wavelength != wavelength: plane_data.wavelength = ct.keVToAngstrom(wavelength) - assert not np.any(np.isnan(plane_data.getTTh())),\ - "plane data exclusions incompatible with wavelength" + assert not np.any( + np.isnan(plane_data.getTTh()) + ), "plane data exclusions incompatible with wavelength" # vstacked G-vector id, h, k, l full_hkls = xrdutil._fetch_hkls_from_planedata(plane_data) @@ -1358,25 +1443,33 @@ def simulate_rotation_series(self, plane_data, grain_param_list, # for each omega solution angList = np.vstack( oscillAnglesOfHKLs( - full_hkls[:, 1:], chi, - rMat_c, bMat, wavelength, + full_hkls[:, 1:], + chi, + rMat_c, + bMat, + wavelength, vInv=vInv_s, - ) ) + ) # filter by eta and omega ranges # ??? get eta range from detector? allAngs, allHKLs = xrdutil._filter_hkls_eta_ome( full_hkls, angList, eta_ranges, ome_ranges - ) + ) allAngs[:, 2] = mapAngle(allAngs[:, 2], ome_period) # find points that fall on the panel det_xy, rMat_s, on_plane = xrdutil._project_on_detector_plane( allAngs, - self.rmat, rMat_c, chi, - self.tvec, tVec_c, tVec_s, - self.distortion) + self.rmat, + rMat_c, + chi, + self.tvec, + tVec_c, + tVec_s, + self.distortion, + ) xys_p, on_panel = self.clip_to_panel(det_xy) valid_xys.append(xys_p) @@ -1398,13 +1491,17 @@ def simulate_rotation_series(self, plane_data, grain_param_list, ang_pixel_size.append(self.angularPixelSize(xys_p)) return valid_ids, valid_hkls, valid_angs, valid_xys, ang_pixel_size - def simulate_laue_pattern(self, crystal_data, - minEnergy=5., maxEnergy=35., - rmat_s=None, tvec_s=None, - grain_params=None, - beam_vec=None): - """ - """ + def simulate_laue_pattern( + self, + crystal_data, + minEnergy=5.0, + maxEnergy=35.0, + rmat_s=None, + tvec_s=None, + grain_params=None, + beam_vec=None, + ): + """ """ if isinstance(crystal_data, PlaneData): plane_data = crystal_data @@ -1436,8 +1533,9 @@ def simulate_laue_pattern(self, crystal_data, # TODO: allow for spectrum parsing multipleEnergyRanges = False if hasattr(maxEnergy, '__len__'): - assert len(maxEnergy) == len(minEnergy), \ - 'energy cutoff ranges must have the same length' + assert len(maxEnergy) == len( + minEnergy + ), 'energy cutoff ranges must have the same length' multipleEnergyRanges = True lmin = [] lmax = [] @@ -1472,11 +1570,11 @@ def simulate_laue_pattern(self, crystal_data, # ========================================================================= # pre-allocate output arrays - xy_det = np.nan*np.ones((n_grains, nhkls_tot, 2)) - hkls_in = np.nan*np.ones((n_grains, 3, nhkls_tot)) - angles = np.nan*np.ones((n_grains, nhkls_tot, 2)) - dspacing = np.nan*np.ones((n_grains, nhkls_tot)) - energy = np.nan*np.ones((n_grains, nhkls_tot)) + xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2)) + hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot)) + angles = np.nan * np.ones((n_grains, nhkls_tot, 2)) + dspacing = np.nan * np.ones((n_grains, nhkls_tot)) + energy = np.nan * np.ones((n_grains, nhkls_tot)) for iG, gp in enumerate(grain_params): rmat_c = makeRotMatOfExpMap(gp[:3]) tvec_c = gp[3:6].reshape(3, 1) @@ -1487,10 +1585,16 @@ def simulate_laue_pattern(self, crystal_data, ghat_c_str = mutil.unitVector(np.dot(rmat_c.T, gvec_s_str)) # project - dpts = gvec_to_xy(ghat_c_str.T, - self.rmat, rmat_s, rmat_c, - self.tvec, tvec_s, tvec_c, - beam_vec=beam_vec) + dpts = gvec_to_xy( + ghat_c_str.T, + self.rmat, + rmat_s, + rmat_c, + self.tvec, + tvec_s, + tvec_c, + beam_vec=beam_vec, + ) # check intersections with detector plane canIntersect = ~np.isnan(dpts[:, 0]) @@ -1503,9 +1607,13 @@ def simulate_laue_pattern(self, crystal_data, # back to angles tth_eta, gvec_l = detectorXYToGvec( dpts, - self.rmat, rmat_s, - self.tvec, tvec_s, tvec_c, - beamVec=beam_vec) + self.rmat, + rmat_s, + self.tvec, + tvec_s, + tvec_c, + beamVec=beam_vec, + ) tth_eta = np.vstack(tth_eta).T # warp measured points @@ -1513,8 +1621,8 @@ def simulate_laue_pattern(self, crystal_data, dpts = self.distortion.apply_inverse(dpts) # plane spacings and energies - dsp = 1. / rowNorm(gvec_s_str[:, canIntersect].T) - wlen = 2*dsp*np.sin(0.5*tth_eta[:, 0]) + dsp = 1.0 / rowNorm(gvec_s_str[:, canIntersect].T) + wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0]) # clip to detector panel _, on_panel = self.clip_to_panel(dpts, buffer_edges=True) @@ -1523,8 +1631,8 @@ def simulate_laue_pattern(self, crystal_data, validEnergy = np.zeros(len(wlen), dtype=bool) for i in range(len(lmin)): in_energy_range = np.logical_and( - wlen >= lmin[i], - wlen <= lmax[i]) + wlen >= lmin[i], wlen <= lmax[i] + ) validEnergy = validEnergy | in_energy_range pass else: @@ -1540,8 +1648,8 @@ def simulate_laue_pattern(self, crystal_data, angles[iG][keepers, :] = tth_eta[keepers, :] dspacing[iG, keepers] = dsp[keepers] energy[iG, keepers] = ct.keVToAngstrom(wlen[keepers]) - pass # close conditional on valids - pass # close loop on grains + pass # close conditional on valids + pass # close loop on grains return xy_det, hkls_in, angles, dspacing, energy @staticmethod @@ -1566,6 +1674,7 @@ def increase_memoization_sizes(funcs, min_size): # UTILITY METHODS # ============================================================================= + def _fix_indices(idx, lo, hi): nidx = np.array(idx) off_lo = nidx < lo @@ -1576,24 +1685,26 @@ def _fix_indices(idx, lo, hi): def _row_edge_vec(rows, pixel_size_row): - return pixel_size_row*(0.5*rows-np.arange(rows+1)) + return pixel_size_row * (0.5 * rows - np.arange(rows + 1)) def _col_edge_vec(cols, pixel_size_col): - return pixel_size_col*(np.arange(cols+1)-0.5*cols) + return pixel_size_col * (np.arange(cols + 1) - 0.5 * cols) # FIXME find a better place for this, and maybe include loop over pixels - @numba.njit(nogil=True, cache=True) def _solid_angle_of_triangle(vtx_list): - norms = np.sqrt(np.sum(vtx_list*vtx_list, axis=1)) + norms = np.sqrt(np.sum(vtx_list * vtx_list, axis=1)) norms_prod = norms[0] * norms[1] * norms[2] - scalar_triple_product = np.dot(vtx_list[0], - np.cross(vtx_list[2], vtx_list[1])) - denominator = norms_prod \ - + norms[0]*np.dot(vtx_list[1], vtx_list[2]) \ - + norms[1]*np.dot(vtx_list[2], vtx_list[0]) \ - + norms[2]*np.dot(vtx_list[0], vtx_list[1]) - - return 2.*np.arctan2(scalar_triple_product, denominator) \ No newline at end of file + scalar_triple_product = np.dot( + vtx_list[0], np.cross(vtx_list[2], vtx_list[1]) + ) + denominator = ( + norms_prod + + norms[0] * np.dot(vtx_list[1], vtx_list[2]) + + norms[1] * np.dot(vtx_list[2], vtx_list[0]) + + norms[2] * np.dot(vtx_list[0], vtx_list[1]) + ) + + return 2.0 * np.arctan2(scalar_triple_product, denominator) diff --git a/hexrd/sampleOrientations/conversions.py b/hexrd/sampleOrientations/conversions.py index 7d840bc16..2694fe6cc 100644 --- a/hexrd/sampleOrientations/conversions.py +++ b/hexrd/sampleOrientations/conversions.py @@ -1,13 +1,12 @@ import numpy as np from numba import njit -import numba -from numba import prange from hexrd import constants ap_2 = constants.cuA_2 sc = constants.sc -@numba.njit(cache=True, nogil=True) + +@njit(cache=True, nogil=True) def getPyramid(xyz): x = xyz[0] y = xyz[1] @@ -36,6 +35,7 @@ def cu2ro(cu): ho = cu2ho(cu) return ho2ro(ho) + @njit(cache=True, nogil=True) def cu2ho(cu): ma = np.max(np.abs(cu)) @@ -87,11 +87,13 @@ def cu2ho(cu): elif pyd == 5 or pyd == 6: return np.array([LamXYZ[1], LamXYZ[2], LamXYZ[0]]) + @njit(cache=True, nogil=True) def ho2ro(ho): ax = ho2ax(ho) return ax2ro(ax) + @njit(cache=True, nogil=True) def ho2ax(ho): hmag = np.linalg.norm(ho[:])**2 @@ -110,6 +112,7 @@ def ho2ax(ho): else: return np.array([hn[0], hn[1], hn[2], s]) + @njit(cache=True, nogil=True) def ax2ro(ax): if np.abs(ax[3]) < 1E-8: @@ -121,11 +124,13 @@ def ax2ro(ax): else: return np.array([ax[0], ax[1], ax[2], np.tan(ax[3]*0.5)]) + @njit(cache=True, nogil=True) def ro2qu(ro): ax = ro2ax(ro) return ax2qu(ax) + @njit(cache=True, nogil=True) def ro2ax(ro): if np.abs(ro[3]) < 1E-8: diff --git a/hexrd/sampleOrientations/rfz.py b/hexrd/sampleOrientations/rfz.py index bc0344eac..36556ca4e 100644 --- a/hexrd/sampleOrientations/rfz.py +++ b/hexrd/sampleOrientations/rfz.py @@ -1,16 +1,17 @@ import numpy as np import numba -from numba import prange from hexrd.constants import FZtypeArray, FZorderArray from hexrd import constants + @numba.njit(cache=True, nogil=True) def getFZtypeandOrder(pgnum): FZtype = FZtypeArray[pgnum-1] FZorder = FZorderArray[pgnum-1] return np.array([FZtype, FZorder]) + @numba.njit(cache=True, nogil=True) def insideCyclicFZ(ro, FZorder): res = False @@ -29,6 +30,7 @@ def insideCyclicFZ(ro, FZorder): return res + @numba.njit(cache=True, nogil=True) def insideDihedralFZ(ro, FZorder): if np.abs(ro[3]) >= np.sqrt(3.0): @@ -75,6 +77,7 @@ def insideDihedralFZ(ro, FZorder): else: return False + @numba.njit(cache=True, nogil=True) def insideCubicFZ(ro, kwrd): rod = np.abs(ro[0:3] * ro[3]) @@ -88,6 +91,7 @@ def insideCubicFZ(ro, kwrd): res = np.logical_and(c1, c2) return res + @numba.njit(cache=True, nogil=True) def insideFZ(ro, pgnum): res = getFZtypeandOrder(pgnum) diff --git a/hexrd/transforms/xf.py b/hexrd/transforms/xf.py index e233e89b7..1d766fc20 100644 --- a/hexrd/transforms/xf.py +++ b/hexrd/transforms/xf.py @@ -29,6 +29,7 @@ import sys import numpy as np import numba + # np.seterr(invalid='ignore') import scipy.sparse as sparse @@ -40,19 +41,19 @@ # Module Data # ============================================================================= -epsf = np.finfo(float).eps # ~2.2e-16 -ten_epsf = 10 * epsf # ~2.2e-15 -sqrt_epsf = np.sqrt(epsf) # ~1.5e-8 +epsf = np.finfo(float).eps # ~2.2e-16 +ten_epsf = 10 * epsf # ~2.2e-15 +sqrt_epsf = np.sqrt(epsf) # ~1.5e-8 -periodDict = {'degrees': 360.0, 'radians': 2*np.pi} -angularUnits = 'radians' # module-level angle units -d2r = np.pi/180.0 +periodDict = {'degrees': 360.0, 'radians': 2 * np.pi} +angularUnits = 'radians' # module-level angle units +d2r = np.pi / 180.0 # basis vectors -I3 = np.eye(3) # (3, 3) identity -Xl = np.array([[1., 0., 0.]], order='C').T # X in the lab frame -Yl = np.array([[0., 1., 0.]], order='C').T # Y in the lab frame -Zl = np.array([[0., 0., 1.]], order='C').T # Z in the lab frame +I3 = np.eye(3) # (3, 3) identity +Xl = np.array([[1.0, 0.0, 0.0]], order='C').T # X in the lab frame +Yl = np.array([[0.0, 1.0, 0.0]], order='C').T # Y in the lab frame +Zl = np.array([[0.0, 0.0, 1.0]], order='C').T # Z in the lab frame zeroVec = np.zeros(3, order='C') @@ -61,7 +62,7 @@ eta_ref = Xl # reference stretch -vInv_ref = np.array([[1., 1., 1., 0., 0., 0.]], order='C').T +vInv_ref = np.array([[1.0, 1.0, 1.0, 0.0, 0.0, 0.0]], order='C').T # ============================================================================= @@ -99,14 +100,15 @@ def _anglesToGVecHelper(angs, out): # [np.sin(0.5*angs[:, 0])]]) n = angs.shape[0] for i in range(n): - ca0 = np.cos(0.5*angs[i, 0]) - sa0 = np.sin(0.5*angs[i, 0]) + ca0 = np.cos(0.5 * angs[i, 0]) + sa0 = np.sin(0.5 * angs[i, 0]) ca1 = np.cos(angs[i, 1]) sa1 = np.sin(angs[i, 1]) out[i, 0] = ca0 * ca1 out[i, 1] = ca0 * sa1 out[i, 2] = sa0 + def anglesToGVec(angs, bHat_l, eHat_l, rMat_s=I3, rMat_c=I3): """ from 'eta' frame out to lab @@ -119,10 +121,9 @@ def anglesToGVec(angs, bHat_l, eHat_l, rMat_s=I3, rMat_c=I3): return np.dot(mat, gVec_e.T) -def gvecToDetectorXY(gVec_c, - rMat_d, rMat_s, rMat_c, - tVec_d, tVec_s, tVec_c, - beamVec=bVec_ref): +def gvecToDetectorXY( + gVec_c, rMat_d, rMat_s, rMat_c, tVec_d, tVec_s, tVec_c, beamVec=bVec_ref +): """ Takes a list of unit reciprocal lattice vectors in crystal frame to the specified detector-relative frame. @@ -161,10 +162,10 @@ def gvecToDetectorXY(gVec_c, """ ztol = epsf - nVec_l = np.dot(rMat_d, Zl) # detector plane normal + nVec_l = np.dot(rMat_d, Zl) # detector plane normal bHat_l = unitVector(beamVec.reshape(3, 1)) # make sure beam vector is unit - P0_l = tVec_s + np.dot(rMat_s, tVec_c) # origin of CRYSTAL FRAME - P3_l = tVec_d # origin of DETECTOR FRAME + P0_l = tVec_s + np.dot(rMat_s, tVec_c) # origin of CRYSTAL FRAME + P3_l = tVec_d # origin of DETECTOR FRAME # form unit reciprocal lattice vectors in lab frame (w/o translation) gVec_l = np.dot(rMat_s, np.dot(rMat_c, unitVector(gVec_c))) @@ -174,7 +175,7 @@ def gvecToDetectorXY(gVec_c, # see who can diffract; initialize output array with NaNs canDiffract = np.atleast_1d( - np.logical_and(bDot >= ztol, bDot <= 1. - ztol) + np.logical_and(bDot >= ztol, bDot <= 1.0 - ztol) ) npts = sum(canDiffract) retval = np.nan * np.ones_like(gVec_l) @@ -195,8 +196,8 @@ def gvecToDetectorXY(gVec_c, # first check for non-instersections denom = np.dot(nVec_l.T, dVec_l).flatten() dzero = abs(denom) < ztol - denom[dzero] = 1. # mitigate divide-by-zero - cantIntersect = denom > 0. # index to dVec_l that can't hit det + denom[dzero] = 1.0 # mitigate divide-by-zero + cantIntersect = denom > 0.0 # index to dVec_l that can't hit det # displacement scaling (along dVec_l) u = np.dot(nVec_l.T, P3_l - P0_l).flatten() / denom @@ -213,12 +214,18 @@ def gvecToDetectorXY(gVec_c, return retval[:2, :].T -def detectorXYToGvec(xy_det, - rMat_d, rMat_s, - tVec_d, tVec_s, tVec_c, - distortion=None, - beamVec=bVec_ref, etaVec=eta_ref, - output_ref=False): +def detectorXYToGvec( + xy_det, + rMat_d, + rMat_s, + tVec_d, + tVec_s, + tVec_c, + distortion=None, + beamVec=bVec_ref, + etaVec=eta_ref, + output_ref=False, +): """ Takes a list cartesian (x, y) pairs in the detector coordinates and calculates the associated reciprocal lattice (G) vectors and @@ -278,7 +285,7 @@ def detectorXYToGvec(xy_det, # in LAB FRAME P2_l = np.dot(rMat_d, P2_d) + tVec_d - P0_l = tVec_s + np.dot(rMat_s, tVec_c) # origin of CRYSTAL FRAME + P0_l = tVec_s + np.dot(rMat_s, tVec_c) # origin of CRYSTAL FRAME # diffraction unit vector components in LAB FRAME dHat_l = unitVector(P2_l - P0_l) @@ -287,8 +294,9 @@ def detectorXYToGvec(xy_det, # generate output # DEBUGGING - assert abs(np.dot(bHat_l.T, eHat_l)) < 1. - sqrt_epsf, \ - "eta ref and beam cannot be parallel!" + assert ( + abs(np.dot(bHat_l.T, eHat_l)) < 1.0 - sqrt_epsf + ), "eta ref and beam cannot be parallel!" rMat_e = makeEtaFrameRotMat(bHat_l, eHat_l) dHat_e = np.dot(rMat_e.T, dHat_l) @@ -313,8 +321,16 @@ def detectorXYToGvec(xy_det, return (tTh, eta), gVec_l -def oscillAnglesOfHKLs(hkls, chi, rMat_c, bMat, wavelength, - vInv=vInv_ref, beamVec=bVec_ref, etaVec=eta_ref): +def oscillAnglesOfHKLs( + hkls, + chi, + rMat_c, + bMat, + wavelength, + vInv=vInv_ref, + beamVec=bVec_ref, + etaVec=eta_ref, +): """ @@ -411,20 +427,30 @@ def oscillAnglesOfHKLs(hkls, chi, rMat_c, bMat, wavelength, schi = np.sin(chi) # coefficients for harmonic equation - a = gHat_s[2, :]*bHat_l[0] \ - + schi*gHat_s[0, :]*bHat_l[1] - cchi*gHat_s[0, :]*bHat_l[2] - b = gHat_s[0, :]*bHat_l[0] \ - - schi*gHat_s[2, :]*bHat_l[1] + cchi*gHat_s[2, :]*bHat_l[2] - c = -sintht - cchi*gHat_s[1, :]*bHat_l[1] - schi*gHat_s[1, :]*bHat_l[2] + a = ( + gHat_s[2, :] * bHat_l[0] + + schi * gHat_s[0, :] * bHat_l[1] + - cchi * gHat_s[0, :] * bHat_l[2] + ) + b = ( + gHat_s[0, :] * bHat_l[0] + - schi * gHat_s[2, :] * bHat_l[1] + + cchi * gHat_s[2, :] * bHat_l[2] + ) + c = ( + -sintht + - cchi * gHat_s[1, :] * bHat_l[1] + - schi * gHat_s[1, :] * bHat_l[2] + ) # should all be 1-d: a = a.flatten(); b = b.flatten(); c = c.flatten() # form solution - abMag = np.sqrt(a*a + b*b) + abMag = np.sqrt(a * a + b * b) assert np.all(abMag > 0), "Beam vector specification is infealible!" phaseAng = np.arctan2(b, a) rhs = c / abMag - rhs[abs(rhs) > 1.] = np.nan + rhs[abs(rhs) > 1.0] = np.nan rhsAng = np.arcsin(rhs) # will give NaN for abs(rhs) > 1. + 0.5*epsf # write ome angle output arrays (NaNs persist here) @@ -434,11 +460,12 @@ def oscillAnglesOfHKLs(hkls, chi, rMat_c, bMat, wavelength, goodOnes_s = -np.isnan(ome0) # DEBUGGING - assert np.all(np.isnan(ome0) == np.isnan(ome1)), \ - "infeasible hkls do not match for ome0, ome1!" + assert np.all( + np.isnan(ome0) == np.isnan(ome1) + ), "infeasible hkls do not match for ome0, ome1!" # do etas -- ONLY COMPUTE IN CASE CONSISTENT REFERENCE COORDINATES - if abs(np.dot(bHat_l.T, eHat_l)) < 1. - sqrt_epsf and np.any(goodOnes_s): + if abs(np.dot(bHat_l.T, eHat_l)) < 1.0 - sqrt_epsf and np.any(goodOnes_s): eta0 = np.nan * np.ones_like(ome0) eta1 = np.nan * np.ones_like(ome1) @@ -456,34 +483,42 @@ def oscillAnglesOfHKLs(hkls, chi, rMat_c, bMat, wavelength, for i in range(numGood): rMat_s = makeOscillRotMat([chi, allome[goodOnes][i]]) gVec_e = np.dot( - rMat_e.T, np.dot( - rMat_s, np.dot( - rMat_c, tmp_gvec[:, i].reshape(3, 1) - ) - ) + rMat_e.T, + np.dot(rMat_s, np.dot(rMat_c, tmp_gvec[:, i].reshape(3, 1))), ) tmp_eta[i] = np.arctan2(gVec_e[1], gVec_e[0]) eta0[goodOnes_s] = tmp_eta[:numGood_s] eta1[goodOnes_s] = tmp_eta[numGood_s:] # make assoc tTh array - tTh = 2.*np.arcsin(sintht).flatten() + tTh = 2.0 * np.arcsin(sintht).flatten() tTh0 = tTh tTh0[-goodOnes_s] = np.nan - retval = (np.vstack([tTh0.flatten(), eta0.flatten(), ome0.flatten()]), - np.vstack([tTh0.flatten(), eta1.flatten(), ome1.flatten()]),) + retval = ( + np.vstack([tTh0.flatten(), eta0.flatten(), ome0.flatten()]), + np.vstack([tTh0.flatten(), eta1.flatten(), ome1.flatten()]), + ) else: retval = (ome0.flatten(), ome1.flatten()) return retval -def polarRebin(thisFrame, - npdiv=2, mmPerPixel=(0.2, 0.2), convertToTTh=False, - rMat_d=I3, tVec_d=np.r_[0., 0., -1000.], - beamVec=bVec_ref, etaVec=eta_ref, - rhoRange=np.r_[20, 200], numRho=1000, - etaRange=(d2r*np.r_[-5, 355]), - numEta=36, verbose=True, log=None): +def polarRebin( + thisFrame, + npdiv=2, + mmPerPixel=(0.2, 0.2), + convertToTTh=False, + rMat_d=I3, + tVec_d=np.r_[0.0, 0.0, -1000.0], + beamVec=bVec_ref, + etaVec=eta_ref, + rhoRange=np.r_[20, 200], + numRho=1000, + etaRange=(d2r * np.r_[-5, 355]), + numEta=36, + verbose=True, + log=None, +): """ Performs polar rebinning of an input image. @@ -558,7 +593,9 @@ def polarRebin(thisFrame, startRho = rhoRange[0] stopRho = rhoRange[1] - subPixArea = 1/float(npdiv)**2 # areal rescaling for subpixel intensities + subPixArea = ( + 1 / float(npdiv) ** 2 + ) # areal rescaling for subpixel intensities # MASTER COORDINATES # - in pixel indices, UPPER LEFT PIXEL is [0, 0] --> (row, col) @@ -570,17 +607,23 @@ def polarRebin(thisFrame, # need rhos (or tThs) and etas) if convertToTTh: - dAngs = detectorXYToGvec(np.vstack([x, y]).T, - rMat_d, I3, - tVec_d, zeroVec, zeroVec, - beamVec=beamVec, etaVec=etaVec) - rho = dAngs[0][0] # this is tTh now + dAngs = detectorXYToGvec( + np.vstack([x, y]).T, + rMat_d, + I3, + tVec_d, + zeroVec, + zeroVec, + beamVec=beamVec, + etaVec=etaVec, + ) + rho = dAngs[0][0] # this is tTh now eta = dAngs[0][1] else: # in here, we are vanilla cartesian - rho = np.sqrt(x*x + y*y) + rho = np.sqrt(x * x + y * y) eta = np.arctan2(y, x) - eta = mapAngle(eta, [startEta, 2*np.pi + startEta], units='radians') + eta = mapAngle(eta, [startEta, 2 * np.pi + startEta], units='radians') # MAKE POLAR BIN CENTER ARRAY deltaEta = (stopEta - startEta) / float(numEta) @@ -604,12 +647,12 @@ def polarRebin(thisFrame, print(msg) pass - rhoI = startRho - 10*deltaRho - rhoF = stopRho + 10*deltaRho + rhoI = startRho - 10 * deltaRho + rhoF = stopRho + 10 * deltaRho inAnnulus = np.where((rho >= rhoI) & (rho <= rhoF))[0] for i in range(numEta): if verbose: - msg = "INFO: Processing sector %d of %d\n" % (i+1, numEta) + msg = "INFO: Processing sector %d of %d\n" % (i + 1, numEta) if log: log.write(msg) else: @@ -617,8 +660,8 @@ def polarRebin(thisFrame, pass # import pdb;pdb.set_trace() - etaI1 = rowEta[i] - 10.5*deltaEta - etaF1 = rowEta[i] + 10.5*deltaEta + etaI1 = rowEta[i] - 10.5 * deltaEta + etaF1 = rowEta[i] + 10.5 * deltaEta tmpEta = eta[inAnnulus] inSector = np.where((tmpEta >= etaI1) & (tmpEta <= etaF1))[0] @@ -640,32 +683,42 @@ def polarRebin(thisFrame, intY = np.tile(intY.flatten(), (nptsIn, 1)).T.flatten() # expand coords using pixel subdivision - tmpX = np.tile(tmpX, (npdiv**2, 1)).flatten() \ - + (intX - 0.5)*mmPerPixel[0] - tmpY = np.tile(tmpY, (npdiv**2, 1)).flatten() \ - + (intY - 0.5)*mmPerPixel[1] + tmpX = ( + np.tile(tmpX, (npdiv**2, 1)).flatten() + + (intX - 0.5) * mmPerPixel[0] + ) + tmpY = ( + np.tile(tmpY, (npdiv**2, 1)).flatten() + + (intY - 0.5) * mmPerPixel[1] + ) tmpI = np.tile(tmpI, (npdiv**2, 1)).flatten() / subPixArea if convertToTTh: - dAngs = detectorXYToGvec(np.vstack([tmpX, tmpY]).T, - rMat_d, I3, - tVec_d, zeroVec, zeroVec, - beamVec=beamVec, etaVec=etaVec) - tmpRho = dAngs[0][0] # this is tTh now + dAngs = detectorXYToGvec( + np.vstack([tmpX, tmpY]).T, + rMat_d, + I3, + tVec_d, + zeroVec, + zeroVec, + beamVec=beamVec, + etaVec=etaVec, + ) + tmpRho = dAngs[0][0] # this is tTh now tmpEta = dAngs[0][1] else: - tmpRho = np.sqrt(tmpX*tmpX + tmpY*tmpY) + tmpRho = np.sqrt(tmpX * tmpX + tmpY * tmpY) tmpEta = np.arctan2(tmpY, tmpX) tmpEta = mapAngle( - tmpEta, [startEta, 2*np.pi + startEta], - units='radians' + tmpEta, [startEta, 2 * np.pi + startEta], units='radians' ) - etaI2 = rowEta[i] - 0.5*deltaEta - etaF2 = rowEta[i] + 0.5*deltaEta + etaI2 = rowEta[i] - 0.5 * deltaEta + etaF2 = rowEta[i] + 0.5 * deltaEta - inSector2 = ((tmpRho >= startRho) & (tmpRho <= stopRho)) \ - & ((tmpEta >= etaI2) & (tmpEta <= etaF2)) + inSector2 = ((tmpRho >= startRho) & (tmpRho <= stopRho)) & ( + (tmpEta >= etaI2) & (tmpEta <= etaF2) + ) tmpRho = tmpRho[inSector2] tmpI = tmpI[inSector2] @@ -673,14 +726,14 @@ def polarRebin(thisFrame, binId = np.floor((tmpRho - startRho) / deltaRho) nSubpixelsIn = len(binId) - if (nSubpixelsIn > 0): + if nSubpixelsIn > 0: tmpI = sparse.csc_matrix( (tmpI, (binId, np.arange(nSubpixelsIn))), - shape=(numRho, nSubpixelsIn) + shape=(numRho, nSubpixelsIn), ) binId = sparse.csc_matrix( (np.ones(nSubpixelsIn), (binId, np.arange(nSubpixelsIn))), - shape=(numRho, nSubpixelsIn) + shape=(numRho, nSubpixelsIn), ) # Normalized contribution to the ith sector's radial bins @@ -688,8 +741,9 @@ def polarRebin(thisFrame, whereNZ = np.asarray( np.not_equal(polImg['intensity'][i, :], binIdSum) ) - polImg['intensity'][i, whereNZ] = \ + polImg['intensity'][i, whereNZ] = ( np.asarray(tmpI.sum(1))[whereNZ].flatten() / binIdSum[whereNZ] + ) return polImg @@ -712,8 +766,8 @@ def arccosSafe(temp): print("attempt to take arccos of %s" % temp, file=sys.stderr) raise RuntimeError("unrecoverable error") - gte1 = temp >= 1. - lte1 = temp <= -1. + gte1 = temp >= 1.0 + lte1 = temp <= -1.0 temp[gte1] = 1 temp[lte1] = -1 @@ -747,7 +801,7 @@ def angularDifference(angList0, angList1, units=angularUnits): period = periodDict[units] # module-level # take difference as arrays diffAngles = np.atleast_1d(angList0) - np.atleast_1d(angList1) - return abs(np.remainder(diffAngles + 0.5*period, period) - 0.5*period) + return abs(np.remainder(diffAngles + 0.5 * period, period) - 0.5 * period) def mapAngle(ang, *args, **kwargs): @@ -764,14 +818,16 @@ def mapAngle(ang, *args, **kwargs): if kwargKeys[iArg] == 'units': units = kwargs[kwargKeys[iArg]] else: - raise RuntimeError("Unknown keyword argument: " - + str(kwargKeys[iArg])) + raise RuntimeError( + "Unknown keyword argument: " + str(kwargKeys[iArg]) + ) try: period = periodDict[units.lower()] - except(KeyError): - raise RuntimeError("unknown angular units: " - + str(kwargs[kwargKeys[iArg]])) + except KeyError: + raise RuntimeError( + "unknown angular units: " + str(kwargs[kwargKeys[iArg]]) + ) ang = np.asarray(ang, dtype=float) @@ -800,7 +856,7 @@ def mapAngle(ang, *args, **kwargs): pass retval = ang else: - retval = np.mod(ang + 0.5*period, period) - 0.5*period + retval = np.mod(ang + 0.5 * period, period) - 0.5 * period return retval @@ -861,9 +917,11 @@ def columnNorm(a): normalize array of column vectors (hstacked, axis = 0) """ if len(a.shape) > 2: - raise RuntimeError("incorrect shape: arg must be 1-d or 2-d, " - + "yours is %d" % (len(a.shape))) - cnrma = np.sqrt(sum(np.asarray(a)**2, 0)) + raise RuntimeError( + "incorrect shape: arg must be 1-d or 2-d, " + + "yours is %d" % (len(a.shape)) + ) + cnrma = np.sqrt(sum(np.asarray(a) ** 2, 0)) return cnrma @@ -872,9 +930,11 @@ def rowNorm(a): normalize array of row vectors (vstacked, axis = 1) """ if len(a.shape) > 2: - raise RuntimeError("incorrect shape: arg must be 1-d or 2-d, " - + "yours is %d" % (len(a.shape))) - cnrma = np.sqrt(sum(np.asarray(a)**2, 1)) + raise RuntimeError( + "incorrect shape: arg must be 1-d or 2-d, " + + "yours is %d" % (len(a.shape)) + ) + cnrma = np.sqrt(sum(np.asarray(a) ** 2, 1)) return cnrma @@ -883,7 +943,7 @@ def _unitVectorSingle(a, b): n = a.shape[0] nrm = 0.0 for i in range(n): - nrm += a[i]*a[i] + nrm += a[i] * a[i] nrm = np.sqrt(nrm) # prevent divide by zero if nrm > epsf: @@ -893,6 +953,7 @@ def _unitVectorSingle(a, b): for i in range(n): b[i] = a[i] + @numba.njit(nogil=True, cache=True) def _unitVectorMulti(a, b): n = a.shape[0] @@ -900,7 +961,7 @@ def _unitVectorMulti(a, b): for j in range(m): nrm = 0.0 for i in range(n): - nrm += a[i, j]*a[i, j] + nrm += a[i, j] * a[i, j] nrm = np.sqrt(nrm) # prevent divide by zero if nrm > epsf: @@ -910,6 +971,7 @@ def _unitVectorMulti(a, b): for i in range(n): b[i, j] = a[i, j] + def unitVector(a): """ normalize array of column vectors (hstacked, axis = 0) @@ -920,8 +982,10 @@ def unitVector(a): elif a.ndim == 2: _unitVectorMulti(a, result) else: - raise ValueError("incorrect arg shape; must be 1-d or 2-d, " - + "yours is %d-d" % (a.ndim)) + raise ValueError( + "incorrect arg shape; must be 1-d or 2-d, " + + "yours is %d-d" % (a.ndim) + ) return result @@ -942,19 +1006,13 @@ def makeDetectorRotMat(tiltAngles): sin_gZ = np.sin(tiltAngles[2]) rotXl = np.array( - [[1., 0., 0.], - [0., cos_gX, -sin_gX], - [0., sin_gX, cos_gX]] + [[1.0, 0.0, 0.0], [0.0, cos_gX, -sin_gX], [0.0, sin_gX, cos_gX]] ) rotYl = np.array( - [[cos_gY, 0., sin_gY], - [0., 1., 0.], - [-sin_gY, 0., cos_gY]] + [[cos_gY, 0.0, sin_gY], [0.0, 1.0, 0.0], [-sin_gY, 0.0, cos_gY]] ) rotZl = np.array( - [[cos_gZ, -sin_gZ, 0.], - [sin_gZ, cos_gZ, 0.], - [0., 0., 1.]] + [[cos_gZ, -sin_gZ, 0.0], [sin_gZ, cos_gZ, 0.0], [0.0, 0.0, 1.0]] ) return np.dot(rotZl, np.dot(rotYl, rotXl)) @@ -969,17 +1027,9 @@ def makeOscillRotMat(oscillAngles): come = np.cos(oscillAngles[1]) some = np.sin(oscillAngles[1]) - rchi = np.array( - [[1., 0., 0.], - [0., cchi, -schi], - [0., schi, cchi]] - ) + rchi = np.array([[1.0, 0.0, 0.0], [0.0, cchi, -schi], [0.0, schi, cchi]]) - rome = np.array( - [[come, 0., some], - [0., 1., 0.], - [-some, 0., come]] - ) + rome = np.array([[come, 0.0, some], [0.0, 1.0, 0.0], [-some, 0.0, come]]) return np.dot(rchi, rome) @@ -1003,13 +1053,18 @@ def makeRotMatOfExpMap(expMap): phi = np.norm(expMap) if phi > epsf: wMat = np.array( - [[0., -expMap[2], expMap[1]], - [expMap[2], 0., -expMap[0]], - [-expMap[1], expMap[0], 0.]]) + [ + [0.0, -expMap[2], expMap[1]], + [expMap[2], 0.0, -expMap[0]], + [-expMap[1], expMap[0], 0.0], + ] + ) - rMat = I3 \ - + (np.sin(phi) / phi) * wMat \ - + ((1. - np.cos(phi)) / (phi*phi)) * np.dot(wMat, wMat) + rMat = ( + I3 + + (np.sin(phi) / phi) * wMat + + ((1.0 - np.cos(phi)) / (phi * phi)) * np.dot(wMat, wMat) + ) else: rMat = I3 @@ -1017,27 +1072,26 @@ def makeRotMatOfExpMap(expMap): def makeBinaryRotMat(axis): - """ - """ + """ """ n = np.asarray(axis).flatten() assert len(n) == 3, 'Axis input does not have 3 components' - return 2*np.dot(n.reshape(3, 1), n.reshape(1, 3)) - I3 + return 2 * np.dot(n.reshape(3, 1), n.reshape(1, 3)) - I3 @numba.njit(nogil=True, cache=True) def _makeEtaFrameRotMat(bHat_l, eHat_l, out): # bHat_l and eHat_l CANNOT have 0 magnitude! # must catch this case as well as colinear bHat_l/eHat_l elsewhere... - bHat_mag = np.sqrt(bHat_l[0]**2 + bHat_l[1]**2 + bHat_l[2]**2) + bHat_mag = np.sqrt(bHat_l[0] ** 2 + bHat_l[1] ** 2 + bHat_l[2] ** 2) # assign Ze as -bHat_l for i in range(3): out[i, 2] = -bHat_l[i] / bHat_mag # find Ye as Ze ^ eHat_l - Ye0 = out[1, 2]*eHat_l[2] - eHat_l[1]*out[2, 2] - Ye1 = out[2, 2]*eHat_l[0] - eHat_l[2]*out[0, 2] - Ye2 = out[0, 2]*eHat_l[1] - eHat_l[0]*out[1, 2] + Ye0 = out[1, 2] * eHat_l[2] - eHat_l[1] * out[2, 2] + Ye1 = out[2, 2] * eHat_l[0] - eHat_l[2] * out[0, 2] + Ye2 = out[0, 2] * eHat_l[1] - eHat_l[0] * out[1, 2] Ye_mag = np.sqrt(Ye0**2 + Ye1**2 + Ye2**2) @@ -1046,9 +1100,10 @@ def _makeEtaFrameRotMat(bHat_l, eHat_l, out): out[2, 1] = Ye2 / Ye_mag # find Xe as Ye ^ Ze - out[0, 0] = out[1, 1]*out[2, 2] - out[1, 2]*out[2, 1] - out[1, 0] = out[2, 1]*out[0, 2] - out[2, 2]*out[0, 1] - out[2, 0] = out[0, 1]*out[1, 2] - out[0, 2]*out[1, 1] + out[0, 0] = out[1, 1] * out[2, 2] - out[1, 2] * out[2, 1] + out[1, 0] = out[2, 1] * out[0, 2] - out[2, 2] * out[0, 1] + out[2, 0] = out[0, 1] * out[1, 2] - out[0, 2] * out[1, 1] + def makeEtaFrameRotMat(bHat_l, eHat_l): """ @@ -1074,8 +1129,8 @@ def angles_in_range(angles, starts, stops, degrees=True): OPTIONAL ARGS: *degrees* - [True] angles & ranges in degrees (or radians) -""" - TAU = 360.0 if degrees else 2*np.pi + """ + TAU = 360.0 if degrees else 2 * np.pi nw = len(starts) na = len(angles) in_range = np.zeros((na), dtype=bool) @@ -1100,13 +1155,14 @@ def validateAngleRanges(angList, startAngs, stopAngs, ccw=True): the same; we treat them as implying 2*pi having been mapped """ # Prefer ravel over flatten because flatten never skips the copy - angList = np.asarray(angList).ravel() # needs to have len + angList = np.asarray(angList).ravel() # needs to have len startAngs = np.asarray(startAngs).ravel() # needs to have len - stopAngs = np.asarray(stopAngs).ravel() # needs to have len + stopAngs = np.asarray(stopAngs).ravel() # needs to have len n_ranges = len(startAngs) - assert len(stopAngs) == n_ranges, \ - "length of min and max angular limits must match!" + assert ( + len(stopAngs) == n_ranges + ), "length of min and max angular limits must match!" # to avoid warnings in >=, <= later down, mark nans; # need these to trick output to False in the case of nan input @@ -1115,7 +1171,7 @@ def validateAngleRanges(angList, startAngs, stopAngs, ccw=True): reflInRange = np.zeros(angList.shape, dtype=bool) # bin length for chunking - binLen = np.pi / 2. + binLen = np.pi / 2.0 # in plane vectors defining wedges x0 = np.vstack([np.cos(startAngs), np.sin(startAngs)]) @@ -1123,37 +1179,35 @@ def validateAngleRanges(angList, startAngs, stopAngs, ccw=True): # dot products dp = np.sum(x0 * x1, axis=0) - if np.any(dp >= 1. - sqrt_epsf) and n_ranges > 1: + if np.any(dp >= 1.0 - sqrt_epsf) and n_ranges > 1: # ambiguous case raise RuntimeError( "At least one of your ranges is alread 360 degrees!" ) - elif dp[0] >= 1. - sqrt_epsf and n_ranges == 1: + elif dp[0] >= 1.0 - sqrt_epsf and n_ranges == 1: # trivial case! reflInRange = np.ones(angList.shape, dtype=bool) reflInRange[nan_mask] = False else: # solve for arc lengths # ...note: no zeros should have made it here - a = x0[0, :]*x1[1, :] - x0[1, :]*x1[0, :] - b = x0[0, :]*x1[0, :] + x0[1, :]*x1[1, :] + a = x0[0, :] * x1[1, :] - x0[1, :] * x1[0, :] + b = x0[0, :] * x1[0, :] + x0[1, :] * x1[1, :] phi = np.arctan2(b, a) - arclen = 0.5*np.pi - phi # these are clockwise + arclen = 0.5 * np.pi - phi # these are clockwise cw_phis = arclen < 0 - arclen[cw_phis] = 2*np.pi + arclen[cw_phis] # all positive (CW) now + arclen[cw_phis] = 2 * np.pi + arclen[cw_phis] # all positive (CW) now if not ccw: - arclen = 2*np.pi - arclen + arclen = 2 * np.pi - arclen - if sum(arclen) > 2*np.pi: - raise RuntimeWarning( - "Specified angle ranges sum to > 360 degrees" - ) + if sum(arclen) > 2 * np.pi: + raise RuntimeWarning("Specified angle ranges sum to > 360 degrees") # check that there are no more thandp = np.zeros(n_ranges) for i in range(n_ranges): # number or subranges using 'binLen' - numSubranges = int(np.ceil(arclen[i]/binLen)) + numSubranges = int(np.ceil(arclen[i] / binLen)) # check remaider binrem = np.remainder(arclen[i], binLen) @@ -1170,23 +1224,25 @@ def validateAngleRanges(angList, startAngs, stopAngs, ccw=True): # Create sub ranges on the fly to avoid ambiguity in dot product # for wedges >= 180 degrees subRanges = np.array( - [startAngs[i] + binLen*j for j in range(numSubranges)] - + [startAngs[i] + binLen*(numSubranges - 1) + finalBinLen] + [startAngs[i] + binLen * j for j in range(numSubranges)] + + [startAngs[i] + binLen * (numSubranges - 1) + finalBinLen] ) for k in range(numSubranges): zStart = _z_project(angList, subRanges[k]) zStop = _z_project(angList, subRanges[k + 1]) if ccw: - zStart[nan_mask] = 999. - zStop[nan_mask] = -999. - reflInRange = reflInRange | \ - np.logical_and(zStart <= 0, zStop >= 0) + zStart[nan_mask] = 999.0 + zStop[nan_mask] = -999.0 + reflInRange = reflInRange | np.logical_and( + zStart <= 0, zStop >= 0 + ) else: - zStart[nan_mask] = -999. - zStop[nan_mask] = 999. - reflInRange = reflInRange | \ - np.logical_and(zStart >= 0, zStop <= 0) + zStart[nan_mask] = -999.0 + zStop[nan_mask] = 999.0 + reflInRange = reflInRange | np.logical_and( + zStart >= 0, zStop <= 0 + ) return reflInRange @@ -1213,33 +1269,50 @@ def rotate_vecs_about_axis(angle, axis, vecs): angle = np.atleast_1d(angle) # quaternion components - q0 = np.cos(0.5*angle) - q1 = np.sin(0.5*angle) + q0 = np.cos(0.5 * angle) + q1 = np.sin(0.5 * angle) qv = np.tile(q1, (3, 1)) * axis # component perpendicular to axes (inherits shape of vecs) - vp0 = vecs[0, :] - axis[0, :]*axis[0, :]*vecs[0, :] \ - - axis[0, :]*axis[1, :]*vecs[1, :] - axis[0, :]*axis[2, :]*vecs[2, :] - vp1 = vecs[1, :] - axis[1, :]*axis[1, :]*vecs[1, :] \ - - axis[1, :]*axis[0, :]*vecs[0, :] - axis[1, :]*axis[2, :]*vecs[2, :] - vp2 = vecs[2, :] - axis[2, :]*axis[2, :]*vecs[2, :] \ - - axis[2, :]*axis[0, :]*vecs[0, :] - axis[2, :]*axis[1, :]*vecs[1, :] + vp0 = ( + vecs[0, :] + - axis[0, :] * axis[0, :] * vecs[0, :] + - axis[0, :] * axis[1, :] * vecs[1, :] + - axis[0, :] * axis[2, :] * vecs[2, :] + ) + vp1 = ( + vecs[1, :] + - axis[1, :] * axis[1, :] * vecs[1, :] + - axis[1, :] * axis[0, :] * vecs[0, :] + - axis[1, :] * axis[2, :] * vecs[2, :] + ) + vp2 = ( + vecs[2, :] + - axis[2, :] * axis[2, :] * vecs[2, :] + - axis[2, :] * axis[0, :] * vecs[0, :] + - axis[2, :] * axis[1, :] * vecs[1, :] + ) # dot product with components along; cross product with components normal - qdota = (axis[0, :] * vecs[0, :] + - axis[1, :] * vecs[1, :] + - axis[2, :] * vecs[2, :])\ - * (axis[0, :] * qv[0, :] + - axis[1, :] * qv[1, :] + - axis[2, :] * qv[2, :]) - qcrossn = np.vstack([qv[1, :]*vp2 - qv[2, :]*vp1, - qv[2, :]*vp0 - qv[0, :]*vp2, - qv[0, :]*vp1 - qv[1, :]*vp0]) + qdota = ( + axis[0, :] * vecs[0, :] + + axis[1, :] * vecs[1, :] + + axis[2, :] * vecs[2, :] + ) * (axis[0, :] * qv[0, :] + axis[1, :] * qv[1, :] + axis[2, :] * qv[2, :]) + qcrossn = np.vstack( + [ + qv[1, :] * vp2 - qv[2, :] * vp1, + qv[2, :] * vp0 - qv[0, :] * vp2, + qv[0, :] * vp1 - qv[1, :] * vp0, + ] + ) # quaternion formula - v_rot = np.tile(q0*q0 - q1*q1, (3, 1)) * vecs \ - + 2. * np.tile(qdota, (3, 1)) * qv \ - + 2. * np.tile(q0, (3, 1)) * qcrossn + v_rot = ( + np.tile(q0 * q0 - q1 * q1, (3, 1)) * vecs + + 2.0 * np.tile(qdota, (3, 1)) * qv + + 2.0 * np.tile(q0, (3, 1)) * qcrossn + ) return v_rot @@ -1272,24 +1345,27 @@ def quat_product_matrix(q, mult='right'): """ if mult == 'right': qmat = np.array( - [[q[0], -q[1], -q[2], -q[3]], - [q[1], q[0], q[3], -q[2]], - [q[2], -q[3], q[0], q[1]], - [q[3], q[2], -q[1], q[0]]] + [ + [q[0], -q[1], -q[2], -q[3]], + [q[1], q[0], q[3], -q[2]], + [q[2], -q[3], q[0], q[1]], + [q[3], q[2], -q[1], q[0]], + ] ) elif mult == 'left': qmat = np.array( - [[q[0], -q[1], -q[2], -q[3]], - [q[1], q[0], -q[3], q[2]], - [q[2], q[3], q[0], -q[1]], - [q[3], -q[2], q[1], q[0]]] + [ + [q[0], -q[1], -q[2], -q[3]], + [q[1], q[0], -q[3], q[2]], + [q[2], q[3], q[0], -q[1]], + [q[3], -q[2], q[1], q[0]], + ] ) return qmat def quat_distance(q1, q2, qsym): - """ - """ + """ """ # qsym from PlaneData objects are (4, nsym) # convert symmetries to (4, 4) qprod matrices nsym = qsym.shape[1] @@ -1299,8 +1375,7 @@ def quat_distance(q1, q2, qsym): # inverse of q1 in matrix form q1i = quat_product_matrix( - np.r_[1, -1, -1, -1] * np.atleast_1d(q1).flatten(), - mult='right' + np.r_[1, -1, -1, -1] * np.atleast_1d(q1).flatten(), mult='right' ) # Do R * Gc, store as vstacked equivalent quaternions (nsym, 4) diff --git a/hexrd/wppf/derivatives.py b/hexrd/wppf/derivatives.py index ffb247fc5..d1e4cce33 100644 --- a/hexrd/wppf/derivatives.py +++ b/hexrd/wppf/derivatives.py @@ -1,6 +1,7 @@ import numpy as np from numba import njit from hexrd.wppf.peakfunctions import _unit_gaussian, _unit_lorentzian + """ naming convention for the derivative is as follows: _d__ @@ -18,102 +19,127 @@ """ + @njit(cache=True, nogil=True) def _d_pvfcj_fwhm(): pass + @njit(cache=True, nogil=True) def _d_pvfcj_tth(): pass + @njit(cache=True, nogil=True) def _d_fwhm_U(): pass + @njit(cache=True, nogil=True) def _d_fwhm_V(): pass + @njit(cache=True, nogil=True) def _d_fwhm_W(): pass + @njit(cache=True, nogil=True) def _d_fwhm_P(): pass + @njit(cache=True, nogil=True) def _d_fwhm_X(): pass + @njit(cache=True, nogil=True) def _d_fwhm_Y(): pass + @njit(cache=True, nogil=True) def _d_fwhm_Xe(): pass + @njit(cache=True, nogil=True) def _d_fwhm_Ye(): pass + @njit(cache=True, nogil=True) def _d_fwhm_Xs(): pass + @njit(cache=True, nogil=True) def _d_fwhm_HL(): pass + @njit(cache=True, nogil=True) def _d_fwhm_SL(): pass + @njit(cache=True, nogil=True) def _d_pvfcj_scale(): pass + @njit(cache=True, nogil=True) def _d_pvfcj_phase_fraction(): pass + @njit(cache=True, nogil=True) def _d_pvfcj_trns(): pass + @njit(cache=True, nogil=True) def _d_pvfcj_shft(): pass + @njit(cache=True, nogil=True) def _d_pvfcj_zero_error(): pass + @njit(cache=True, nogil=True) def _d_pvfcj_shkls(): pass + @njit(cache=True, nogil=True) def _d_pvfcj_a(): pass + @njit(cache=True, nogil=True) def _d_pvfcj_b(): pass + @njit(cache=True, nogil=True) def _d_pvfcj_c(): pass + @njit(cache=True, nogil=True) def _d_pvfcj_alpha(): pass + @njit(cache=True, nogil=True) def _d_pvfcj_beta(): pass + @njit(cache=True, nogil=True) def _d_pvfcj_gamma(): pass diff --git a/hexrd/wppf/peakfunctions.py b/hexrd/wppf/peakfunctions.py index 0e2d2b66f..4e02b2dbb 100644 --- a/hexrd/wppf/peakfunctions.py +++ b/hexrd/wppf/peakfunctions.py @@ -30,6 +30,7 @@ from hexrd import constants from numba import vectorize, float64, njit, prange from hexrd.fitting.peakfunctions import erfc, exp1exp + # from scipy.special import erfc, exp1 # addr = get_cython_function_address("scipy.special.cython_special", "exp1") @@ -37,7 +38,7 @@ # exp1_fn = functype(addr) gauss_width_fact = constants.sigma_to_fwhm -lorentz_width_fact = 2. +lorentz_width_fact = 2.0 # FIXME: we need this for the time being to be able to parse multipeak fitting # results; need to wrap all this up in a class in the future! @@ -45,19 +46,16 @@ 'gaussian': 3, 'lorentzian': 3, 'pvoigt': 4, - 'split_pvoigt': 6 + 'split_pvoigt': 6, } """ Calgliotti and Lorentzian FWHM functions """ + + @njit(cache=True, nogil=True) -def _gaussian_fwhm(uvw, - P, - gamma_ani_sqr, - eta_mixing, - tth, - dsp): +def _gaussian_fwhm(uvw, P, gamma_ani_sqr, eta_mixing, tth, dsp): """ @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov @DATE: 05/20/2020 SS 1.0 original @@ -70,24 +68,19 @@ def _gaussian_fwhm(uvw, dsp d-spacing """ U, V, W = uvw - th = np.radians(0.5*tth) + th = np.radians(0.5 * tth) tanth = np.tan(th) - cth2 = np.cos(th)**2.0 - sig2_ani = gamma_ani_sqr*(1.-eta_mixing)**2*dsp**4 - sigsqr = (U+sig2_ani) * tanth**2 + V * tanth + W + P/cth2 - if(sigsqr <= 0.): + cth2 = np.cos(th) ** 2.0 + sig2_ani = gamma_ani_sqr * (1.0 - eta_mixing) ** 2 * dsp**4 + sigsqr = (U + sig2_ani) * tanth**2 + V * tanth + W + P / cth2 + if sigsqr <= 0.0: sigsqr = 1.0e-12 - return np.sqrt(sigsqr)*1e-2 + return np.sqrt(sigsqr) * 1e-2 @njit(cache=True, nogil=True) -def _lorentzian_fwhm(xy, - xy_sf, - gamma_ani_sqr, - eta_mixing, - tth, - dsp): +def _lorentzian_fwhm(xy, xy_sf, gamma_ani_sqr, eta_mixing, tth, dsp): """ @AUTHOR: Saransh Singh, Lawrence Livermore National Lab, saransh1@llnl.gov @DATE: 07/20/2020 SS 1.0 original @@ -102,12 +95,13 @@ def _lorentzian_fwhm(xy, else regular broadening """ X, Y = xy - th = np.radians(0.5*tth) + th = np.radians(0.5 * tth) tanth = np.tan(th) cth = np.cos(th) - sig_ani = np.sqrt(gamma_ani_sqr)*eta_mixing*dsp**2 - gamma = (X+xy_sf)/cth + (Y+sig_ani)*tanth - return gamma*1e-2 + sig_ani = np.sqrt(gamma_ani_sqr) * eta_mixing * dsp**2 + gamma = (X + xy_sf) / cth + (Y + sig_ani) * tanth + return gamma * 1e-2 + @njit(cache=True, nogil=True) def _anisotropic_peak_broadening(shkl, hkl): @@ -125,36 +119,50 @@ def _anisotropic_peak_broadening(shkl, hkl): "s310", "s103", "s031", "s130", "s301", "s013", "s211", "s121", "s112"] """ - h,k,l = hkl - gamma_sqr = (shkl[0]*h**4 + - shkl[1]*k**4 + - shkl[2]*l**4 + - 3.0*(shkl[3]*(h*k)**2 + - shkl[4]*(h*l)**2 + - shkl[5]*(k*l)**2)+ - 2.0*(shkl[6]*k*h**3 + - shkl[7]*h*l**3 + - shkl[8]*l*k**3 + - shkl[9]*h*k**3 + - shkl[10]*l*h**3 + - shkl[11]*k*l**3) + - 4.0*(shkl[12]*k*l*h**2 + - shkl[13]*h*l*k**2 + - shkl[14]*h*k*l**2)) + h, k, l = hkl + gamma_sqr = ( + shkl[0] * h**4 + + shkl[1] * k**4 + + shkl[2] * l**4 + + 3.0 + * ( + shkl[3] * (h * k) ** 2 + + shkl[4] * (h * l) ** 2 + + shkl[5] * (k * l) ** 2 + ) + + 2.0 + * ( + shkl[6] * k * h**3 + + shkl[7] * h * l**3 + + shkl[8] * l * k**3 + + shkl[9] * h * k**3 + + shkl[10] * l * h**3 + + shkl[11] * k * l**3 + ) + + 4.0 + * ( + shkl[12] * k * l * h**2 + + shkl[13] * h * l * k**2 + + shkl[14] * h * k * l**2 + ) + ) return gamma_sqr + def _anisotropic_gaussian_component(gamma_sqr, eta_mixing): """ gaussian component in anisotropic broadening """ - return gamma_sqr*(1. - eta_mixing)**2 + return gamma_sqr * (1.0 - eta_mixing) ** 2 + def _anisotropic_lorentzian_component(gamma_sqr, eta_mixing): """ lorentzian component in anisotropic broadening """ - return np.sqrt(gamma_sqr)*eta_mixing + return np.sqrt(gamma_sqr) * eta_mixing + # ============================================================================= # 1-D Gaussian Functions @@ -175,11 +183,12 @@ def _unit_gaussian(p, x): x0 = p[0] FWHM = p[1] - sigma = FWHM/gauss_width_fact + sigma = FWHM / gauss_width_fact - f = np.exp(-(x-x0)**2/(2.*sigma**2.)) + f = np.exp(-((x - x0) ** 2) / (2.0 * sigma**2.0)) return f + # ============================================================================= # 1-D Lorentzian Functions # ============================================================================= @@ -197,11 +206,12 @@ def _unit_lorentzian(p, x): x0 = p[0] FWHM = p[1] - gamma = FWHM/lorentz_width_fact + gamma = FWHM / lorentz_width_fact - f = gamma / ((x-x0)**2 + gamma**2) + f = gamma / ((x - x0) ** 2 + gamma**2) return f + @njit(cache=True, nogil=True) def _mixing_factor_pv(fwhm_g, fwhm_l): """ @@ -214,99 +224,95 @@ def _mixing_factor_pv(fwhm_g, fwhm_l): @DETAILS: calculates the mixing factor eta to best approximate voight peak shapes """ - fwhm = fwhm_g**5 + 2.69269 * fwhm_g**4 * fwhm_l + \ - 2.42843 * fwhm_g**3 * fwhm_l**2 + \ - 4.47163 * fwhm_g**2 * fwhm_l**3 +\ - 0.07842 * fwhm_g * fwhm_l**4 +\ - fwhm_l**5 + fwhm = ( + fwhm_g**5 + + 2.69269 * fwhm_g**4 * fwhm_l + + 2.42843 * fwhm_g**3 * fwhm_l**2 + + 4.47163 * fwhm_g**2 * fwhm_l**3 + + 0.07842 * fwhm_g * fwhm_l**4 + + fwhm_l**5 + ) fwhm = fwhm**0.20 - eta = 1.36603 * (fwhm_l/fwhm) - \ - 0.47719 * (fwhm_l/fwhm)**2 + \ - 0.11116 * (fwhm_l/fwhm)**3 - if eta < 0.: - eta = 0. - elif eta > 1.: - eta = 1. + eta = ( + 1.36603 * (fwhm_l / fwhm) + - 0.47719 * (fwhm_l / fwhm) ** 2 + + 0.11116 * (fwhm_l / fwhm) ** 3 + ) + if eta < 0.0: + eta = 0.0 + elif eta > 1.0: + eta = 1.0 return eta, fwhm + @njit(cache=True, nogil=True) -def pvoight_wppf(uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - tth, - dsp, - hkl, - tth_list): +def pvoight_wppf(uvw, p, xy, xy_sf, shkl, eta_mixing, tth, dsp, hkl, tth_list): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/22/2021 SS 1.0 original @details pseudo voight peak profile for WPPF """ - gamma_ani_sqr = _anisotropic_peak_broadening( - shkl, hkl) - fwhm_g = _gaussian_fwhm(uvw, p, - gamma_ani_sqr, - eta_mixing, - tth, dsp) - fwhm_l = _lorentzian_fwhm(xy, xy_sf, - gamma_ani_sqr, eta_mixing, - tth, dsp) + gamma_ani_sqr = _anisotropic_peak_broadening(shkl, hkl) + fwhm_g = _gaussian_fwhm(uvw, p, gamma_ani_sqr, eta_mixing, tth, dsp) + fwhm_l = _lorentzian_fwhm(xy, xy_sf, gamma_ani_sqr, eta_mixing, tth, dsp) n, fwhm = _mixing_factor_pv(fwhm_g, fwhm_l) - Ag = 0.9394372787/fwhm # normalization factor for unit area - Al = 1.0/np.pi # normalization factor for unit area + Ag = 0.9394372787 / fwhm # normalization factor for unit area + Al = 1.0 / np.pi # normalization factor for unit area + + g = Ag * _unit_gaussian(np.array([tth, fwhm]), tth_list) + l = Al * _unit_lorentzian(np.array([tth, fwhm]), tth_list) - g = Ag*_unit_gaussian(np.array([tth, fwhm]), tth_list) - l = Al*_unit_lorentzian(np.array([tth, fwhm]), tth_list) + return n * l + (1.0 - n) * g - return n*l + (1.0-n)*g @njit(cache=True, nogil=True) def _func_h(tau, tth_r): - cph = np.cos(tth_r - tau) + cph = np.cos(tth_r - tau) ctth = np.cos(tth_r) - return np.sqrt( (cph/ctth)**2 - 1.) + return np.sqrt((cph / ctth) ** 2 - 1.0) + @njit(cache=True, nogil=True) def _func_W(HoL, SoL, tau, tau_min, tau_infl, tth): - if tth < np.pi/2.0: - if tau >= 0. and tau <= tau_infl: - res = 2.0*min(HoL,SoL) + if tth < np.pi / 2.0: + if tau >= 0.0 and tau <= tau_infl: + res = 2.0 * min(HoL, SoL) elif tau > tau_infl and tau <= tau_min: - res = HoL+SoL+_func_h(tau,tth) + res = HoL + SoL + _func_h(tau, tth) else: res = 0.0 else: - if tau <= 0. and tau >= tau_infl: - res = 2.0*min(HoL,SoL) + if tau <= 0.0 and tau >= tau_infl: + res = 2.0 * min(HoL, SoL) elif tau < tau_infl and tau >= tau_min: - res = HoL+SoL+_func_h(tau,tth) + res = HoL + SoL + _func_h(tau, tth) else: res = 0.0 return res + @njit(cache=True, nogil=True) -def pvfcj(uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - tth, - dsp, - hkl, - tth_list, - HoL, - SoL, - xn, - wn): +def pvfcj( + uvw, + p, + xy, + xy_sf, + shkl, + eta_mixing, + tth, + dsp, + hkl, + tth_list, + HoL, + SoL, + xn, + wn, +): """ @author Saransh Singh, Lawrence Livermore National Lab @date 04/02/2021 SS 1.0 original @@ -319,63 +325,63 @@ def pvfcj(uvw, tth_r = np.radians(tth) ctth = np.cos(tth_r) - arg = ctth*np.sqrt(((HoL+SoL)**2+1.)) + arg = ctth * np.sqrt(((HoL + SoL) ** 2 + 1.0)) cinv = np.arccos(arg) tau_min = tth_r - cinv # two theta of inflection point - arg = ctth*np.sqrt(((HoL-SoL)**2+1.)) + arg = ctth * np.sqrt(((HoL - SoL) ** 2 + 1.0)) cinv = np.arccos(arg) tau_infl = tth_r - cinv - tau = tau_min*xn + tau = tau_min * xn cx = np.cos(tau) res = np.zeros(tth_list.shape) den = 0.0 for i in np.arange(tau.shape[0]): - x = tth_r-tau[i] + x = tth_r - tau[i] xx = tau[i] - W = _func_W(HoL,SoL,xx,tau_min,tau_infl,tth_r) + W = _func_W(HoL, SoL, xx, tau_min, tau_infl, tth_r) h = _func_h(xx, tth_r) - fact = wn[i]*(W/h/cx[i]) + fact = wn[i] * (W / h / cx[i]) den += fact - pv = pvoight_wppf(uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - np.degrees(x), - dsp, - hkl, - tth_list) - res += pv*fact - - res = np.sin(tth_r)*res/den/4./HoL/SoL + pv = pvoight_wppf( + uvw, + p, + xy, + xy_sf, + shkl, + eta_mixing, + np.degrees(x), + dsp, + hkl, + tth_list, + ) + res += pv * fact + + res = np.sin(tth_r) * res / den / 4.0 / HoL / SoL a = np.trapz(res, tth_list) - return res/a + return res / a + @njit(cache=True, nogil=True) def _calc_alpha(alpha, tth): a0, a1 = alpha - return (a0 + a1*np.tan(np.radians(0.5*tth))) + return a0 + a1 * np.tan(np.radians(0.5 * tth)) @njit(cache=True, nogil=True) def _calc_beta(beta, tth): b0, b1 = beta - return b0 + b1*np.tan(np.radians(0.5*tth)) + return b0 + b1 * np.tan(np.radians(0.5 * tth)) + @njit(cache=True, nogil=True) -def _gaussian_pink_beam(alpha, - beta, - fwhm_g, - tth, - tth_list): +def _gaussian_pink_beam(alpha, beta, fwhm_g, tth, tth_list): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/22/2021 SS 1.0 original @@ -386,32 +392,28 @@ def _gaussian_pink_beam(alpha, """ del_tth = tth_list - tth sigsqr = fwhm_g**2 - f1 = alpha*sigsqr + 2.0*del_tth - f2 = beta*sigsqr - 2.0*del_tth - f3 = np.sqrt(2.0)*fwhm_g - u = 0.5*alpha*f1 - v = 0.5*beta*f2 - y = (f1-del_tth)/f3 - z = (f2+del_tth)/f3 + f1 = alpha * sigsqr + 2.0 * del_tth + f2 = beta * sigsqr - 2.0 * del_tth + f3 = np.sqrt(2.0) * fwhm_g + u = 0.5 * alpha * f1 + v = 0.5 * beta * f2 + y = (f1 - del_tth) / f3 + z = (f2 + del_tth) / f3 t1 = erfc(y) t2 = erfc(z) g = np.zeros(tth_list.shape) - zmask = np.abs(del_tth) > 5.0 - g[~zmask] = (0.5*(alpha*beta)/(alpha + beta)) \ - * np.exp(u[~zmask])*t1[~zmask] + \ - np.exp(v[~zmask])*t2[~zmask] + zmask = np.abs(del_tth) > 5.0 + g[~zmask] = (0.5 * (alpha * beta) / (alpha + beta)) * np.exp( + u[~zmask] + ) * t1[~zmask] + np.exp(v[~zmask]) * t2[~zmask] mask = np.isnan(g) - g[mask] = 0. + g[mask] = 0.0 return g @njit(cache=True, nogil=True) -def _lorentzian_pink_beam(alpha, - beta, - fwhm_l, - tth, - tth_list): +def _lorentzian_pink_beam(alpha, beta, fwhm_l, tth, tth_list): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/22/2021 SS 1.0 original @@ -421,8 +423,8 @@ def _lorentzian_pink_beam(alpha, Von Dreele et. al., J. Appl. Cryst. (2021). 54, 3–6 """ del_tth = tth_list - tth - p = -alpha*del_tth + 1j * 0.5*alpha*fwhm_l - q = -beta*del_tth + 1j * 0.5*beta*fwhm_l + p = -alpha * del_tth + 1j * 0.5 * alpha * fwhm_l + q = -beta * del_tth + 1j * 0.5 * beta * fwhm_l y = np.zeros(tth_list.shape) @@ -431,26 +433,18 @@ def _lorentzian_pink_beam(alpha, # f1 = exp1(p) # f2 = exp1(q) - y = -(alpha*beta)/(np.pi*(alpha+beta))*(f1+f2).imag - + y = -(alpha * beta) / (np.pi * (alpha + beta)) * (f1 + f2).imag + mask = np.isnan(y) - y[mask] = 0. + y[mask] = 0.0 return y + @njit(cache=True, nogil=True) -def pvoight_pink_beam(alpha, - beta, - uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - tth, - dsp, - hkl, - tth_list): +def pvoight_pink_beam( + alpha, beta, uvw, p, xy, xy_sf, shkl, eta_mixing, tth, dsp, hkl, tth_list +): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/22/2021 SS 1.0 original @@ -460,48 +454,43 @@ def pvoight_pink_beam(alpha, alpha_exp = _calc_alpha(alpha, tth) beta_exp = _calc_beta(beta, tth) - gamma_ani_sqr = _anisotropic_peak_broadening( - shkl, hkl) + gamma_ani_sqr = _anisotropic_peak_broadening(shkl, hkl) - fwhm_g = _gaussian_fwhm(uvw, p, - gamma_ani_sqr, - eta_mixing, - tth, dsp) - fwhm_l = _lorentzian_fwhm(xy, xy_sf, - gamma_ani_sqr, eta_mixing, - tth, dsp) + fwhm_g = _gaussian_fwhm(uvw, p, gamma_ani_sqr, eta_mixing, tth, dsp) + fwhm_l = _lorentzian_fwhm(xy, xy_sf, gamma_ani_sqr, eta_mixing, tth, dsp) n, fwhm = _mixing_factor_pv(fwhm_g, fwhm_l) - g = _gaussian_pink_beam(alpha_exp, beta_exp, - fwhm_g, tth, tth_list) - l = _lorentzian_pink_beam(alpha_exp, beta_exp, - fwhm_l, tth, tth_list) - ag = np.trapz(g,tth_list) - al = np.trapz(l,tth_list) + g = _gaussian_pink_beam(alpha_exp, beta_exp, fwhm_g, tth, tth_list) + l = _lorentzian_pink_beam(alpha_exp, beta_exp, fwhm_l, tth, tth_list) + ag = np.trapz(g, tth_list) + al = np.trapz(l, tth_list) if np.abs(ag) < 1e-6: ag = 1.0 if np.abs(al) < 1e-6: al = 1.0 - return n*l/al + (1.0-n)*g/ag + return n * l / al + (1.0 - n) * g / ag + @njit(cache=True, nogil=True, parallel=True) -def computespectrum_pvfcj(uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - HL, - SL, - tth, - dsp, - hkl, - tth_list, - Iobs, - xn, - wn): +def computespectrum_pvfcj( + uvw, + p, + xy, + xy_sf, + shkl, + eta_mixing, + HL, + SL, + tth, + dsp, + hkl, + tth_list, + Iobs, + xn, + wn, +): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/31/2021 SS 1.0 original @@ -512,9 +501,9 @@ def computespectrum_pvfcj(uvw, """ spec = np.zeros(tth_list.shape) - nref = np.min(np.array([Iobs.shape[0], - tth.shape[0], - dsp.shape[0],hkl.shape[0]])) + nref = np.min( + np.array([Iobs.shape[0], tth.shape[0], dsp.shape[0], hkl.shape[0]]) + ) for ii in prange(nref): II = Iobs[ii] @@ -523,27 +512,18 @@ def computespectrum_pvfcj(uvw, g = hkl[ii] xs = xy_sf[ii] - pv = pvfcj(uvw,p,xy,xs, - shkl,eta_mixing, - t,d,g, - tth_list, - HL,SL,xn,wn) + pv = pvfcj( + uvw, p, xy, xs, shkl, eta_mixing, t, d, g, tth_list, HL, SL, xn, wn + ) spec += II * pv return spec + @njit(cache=True, nogil=True, parallel=True) -def computespectrum_pvtch(uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - tth, - dsp, - hkl, - tth_list, - Iobs): +def computespectrum_pvtch( + uvw, p, xy, xy_sf, shkl, eta_mixing, tth, dsp, hkl, tth_list, Iobs +): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/31/2021 SS 1.0 original @@ -554,9 +534,9 @@ def computespectrum_pvtch(uvw, """ spec = np.zeros(tth_list.shape) - nref = np.min(np.array([Iobs.shape[0], - tth.shape[0], - dsp.shape[0],hkl.shape[0]])) + nref = np.min( + np.array([Iobs.shape[0], tth.shape[0], dsp.shape[0], hkl.shape[0]]) + ) for ii in prange(nref): II = Iobs[ii] @@ -565,28 +545,28 @@ def computespectrum_pvtch(uvw, g = hkl[ii] xs = xy_sf[ii] - pv = pvoight_wppf(uvw,p,xy, - xs,shkl,eta_mixing, - t,d,g, - tth_list) + pv = pvoight_wppf(uvw, p, xy, xs, shkl, eta_mixing, t, d, g, tth_list) spec += II * pv return spec + @njit(cache=True, nogil=True, parallel=True) -def computespectrum_pvpink(alpha, - beta, - uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - tth, - dsp, - hkl, - tth_list, - Iobs): +def computespectrum_pvpink( + alpha, + beta, + uvw, + p, + xy, + xy_sf, + shkl, + eta_mixing, + tth, + dsp, + hkl, + tth_list, + Iobs, +): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/31/2021 SS 1.0 original @@ -597,9 +577,9 @@ def computespectrum_pvpink(alpha, """ spec = np.zeros(tth_list.shape) - nref = np.min(np.array([Iobs.shape[0], - tth.shape[0], - dsp.shape[0],hkl.shape[0]])) + nref = np.min( + np.array([Iobs.shape[0], tth.shape[0], dsp.shape[0], hkl.shape[0]]) + ) for ii in prange(nref): II = Iobs[ii] @@ -608,33 +588,34 @@ def computespectrum_pvpink(alpha, g = hkl[ii] xs = xy_sf[ii] - pv = pvoight_pink_beam(alpha,beta, - uvw,p,xy, - xs,shkl,eta_mixing, - t,d,g, - tth_list) + pv = pvoight_pink_beam( + alpha, beta, uvw, p, xy, xs, shkl, eta_mixing, t, d, g, tth_list + ) spec += II * pv return spec + @njit(cache=True, nogil=True) -def calc_Iobs_pvfcj(uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - HL, - SL, - xn, - wn, - tth, - dsp, - hkl, - tth_list, - Icalc, - spectrum_expt, - spectrum_sim): +def calc_Iobs_pvfcj( + uvw, + p, + xy, + xy_sf, + shkl, + eta_mixing, + HL, + SL, + xn, + wn, + tth, + dsp, + hkl, + tth_list, + Icalc, + spectrum_expt, + spectrum_sim, +): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/31/2021 SS 1.0 original @@ -644,16 +625,16 @@ def calc_Iobs_pvfcj(uvw, the final intensities """ Iobs = np.zeros(tth.shape) - nref = np.min(np.array([Icalc.shape[0], - tth.shape[0], - dsp.shape[0],hkl.shape[0]])) + nref = np.min( + np.array([Icalc.shape[0], tth.shape[0], dsp.shape[0], hkl.shape[0]]) + ) - yo = spectrum_expt[:,1] - yc = spectrum_sim[:,1] - mask = yc != 0. + yo = spectrum_expt[:, 1] + yc = spectrum_sim[:, 1] + mask = yc != 0.0 yo = yo[mask] yc = yc[mask] - tth_list_mask = spectrum_expt[:,0] + tth_list_mask = spectrum_expt[:, 0] tth_list_mask = tth_list_mask[mask] for ii in np.arange(nref): @@ -663,33 +644,47 @@ def calc_Iobs_pvfcj(uvw, g = hkl[ii] xs = xy_sf[ii] - pv = pvfcj(uvw,p,xy,xs, - shkl,eta_mixing, - t,d,g, - tth_list_mask, - HL,SL,xn,wn) + pv = pvfcj( + uvw, + p, + xy, + xs, + shkl, + eta_mixing, + t, + d, + g, + tth_list_mask, + HL, + SL, + xn, + wn, + ) y = Ic * pv y = y[mask] - Iobs[ii] = np.trapz(yo*y/yc, tth_list_mask) + Iobs[ii] = np.trapz(yo * y / yc, tth_list_mask) return Iobs + @njit(cache=True, nogil=True) -def calc_Iobs_pvtch(uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - tth, - dsp, - hkl, - tth_list, - Icalc, - spectrum_expt, - spectrum_sim): +def calc_Iobs_pvtch( + uvw, + p, + xy, + xy_sf, + shkl, + eta_mixing, + tth, + dsp, + hkl, + tth_list, + Icalc, + spectrum_expt, + spectrum_sim, +): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/31/2021 SS 1.0 original @@ -699,16 +694,16 @@ def calc_Iobs_pvtch(uvw, the final intensities """ Iobs = np.zeros(tth.shape) - nref = np.min(np.array([Icalc.shape[0], - tth.shape[0], - dsp.shape[0],hkl.shape[0]])) + nref = np.min( + np.array([Icalc.shape[0], tth.shape[0], dsp.shape[0], hkl.shape[0]]) + ) - yo = spectrum_expt[:,1] - yc = spectrum_sim[:,1] - mask = yc != 0. + yo = spectrum_expt[:, 1] + yc = spectrum_sim[:, 1] + mask = yc != 0.0 yo = yo[mask] yc = yc[mask] - tth_list_mask = spectrum_expt[:,0] + tth_list_mask = spectrum_expt[:, 0] tth_list_mask = tth_list_mask[mask] for ii in np.arange(nref): @@ -718,34 +713,36 @@ def calc_Iobs_pvtch(uvw, g = hkl[ii] xs = xy_sf[ii] - pv = pvoight_wppf(uvw,p,xy, - xs,shkl,eta_mixing, - t,d,g, - tth_list_mask) + pv = pvoight_wppf( + uvw, p, xy, xs, shkl, eta_mixing, t, d, g, tth_list_mask + ) y = Ic * pv y = y[mask] - Iobs[ii] = np.trapz(yo*y/yc, tth_list_mask) + Iobs[ii] = np.trapz(yo * y / yc, tth_list_mask) return Iobs + @njit(cache=True, nogil=True) -def calc_Iobs_pvpink(alpha, - beta, - uvw, - p, - xy, - xy_sf, - shkl, - eta_mixing, - tth, - dsp, - hkl, - tth_list, - Icalc, - spectrum_expt, - spectrum_sim): +def calc_Iobs_pvpink( + alpha, + beta, + uvw, + p, + xy, + xy_sf, + shkl, + eta_mixing, + tth, + dsp, + hkl, + tth_list, + Icalc, + spectrum_expt, + spectrum_sim, +): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/31/2021 SS 1.0 original @@ -755,16 +752,16 @@ def calc_Iobs_pvpink(alpha, the final intensities """ Iobs = np.zeros(tth.shape) - nref = np.min(np.array([Icalc.shape[0], - tth.shape[0], - dsp.shape[0],hkl.shape[0]])) + nref = np.min( + np.array([Icalc.shape[0], tth.shape[0], dsp.shape[0], hkl.shape[0]]) + ) - yo = spectrum_expt[:,1] - yc = spectrum_sim[:,1] - mask = yc != 0. + yo = spectrum_expt[:, 1] + yc = spectrum_sim[:, 1] + mask = yc != 0.0 yo = yo[mask] yc = yc[mask] - tth_list_mask = spectrum_expt[:,0] + tth_list_mask = spectrum_expt[:, 0] tth_list_mask = tth_list_mask[mask] for ii in prange(nref): @@ -774,24 +771,31 @@ def calc_Iobs_pvpink(alpha, g = hkl[ii] xs = xy_sf[ii] - pv = pvoight_pink_beam(alpha, beta, - uvw,p,xy,xs, - shkl,eta_mixing, - t,d,g, - tth_list_mask) + pv = pvoight_pink_beam( + alpha, + beta, + uvw, + p, + xy, + xs, + shkl, + eta_mixing, + t, + d, + g, + tth_list_mask, + ) y = Ic * pv y = y[mask] - Iobs[ii] = np.trapz(yo*y/yc, tth_list_mask) + Iobs[ii] = np.trapz(yo * y / yc, tth_list_mask) return Iobs + @njit(cache=True, nogil=True) -def calc_rwp(spectrum_sim, - spectrum_expt, - weights, - P): +def calc_rwp(spectrum_sim, spectrum_expt, weights, P): """ @author Saransh Singh, Lawrence Livermore National Lab @date 03/31/2021 SS 1.0 original @@ -799,9 +803,9 @@ def calc_rwp(spectrum_sim, moved outside of the class to allow numba implementation P : number of independent parameters in fitting """ - err = weights[:,1]*(spectrum_sim[:,1] - spectrum_expt[:,1])**2 + err = weights[:, 1] * (spectrum_sim[:, 1] - spectrum_expt[:, 1]) ** 2 - weighted_expt = weights[:,1] * spectrum_expt[:,1] **2 + weighted_expt = weights[:, 1] * spectrum_expt[:, 1] ** 2 errvec = np.sqrt(err) @@ -810,9 +814,9 @@ def calc_rwp(spectrum_sim, den = np.sum(weighted_expt) """ standard Rwp i.e. weighted residual """ - if(den > 0.): - if(wss/den > 0.): - Rwp = np.sqrt(wss/den) + if den > 0.0: + if wss / den > 0.0: + Rwp = np.sqrt(wss / den) else: Rwp = np.inf else: @@ -821,17 +825,17 @@ def calc_rwp(spectrum_sim, """ number of observations to fit i.e. number of data points """ N = spectrum_sim.shape[0] - if den > 0.: - if (N-P)/den > 0: - Rexp = np.sqrt((N-P)/den) + if den > 0.0: + if (N - P) / den > 0: + Rexp = np.sqrt((N - P) / den) else: Rexp = 0.0 else: Rexp = np.inf # Rwp and goodness of fit parameters - if Rexp > 0.: - gofF = Rwp/Rexp + if Rexp > 0.0: + gofF = Rwp / Rexp else: gofF = np.inf