Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add option to calc emission delay #43

Merged
merged 5 commits into from
Sep 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 19 additions & 19 deletions reduction/data/reference_r201284_quick.txt
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
1.163378023199675496e-02 1.325593461531626494e+00 1.120250529342311230e-01
1.186645583663669019e-02 1.294680326566702400e+00 9.672069214763454048e-02
1.210378495336942445e-02 1.238274370485321851e+00 8.648896912330114595e-02
1.234586065243681308e-02 1.159832519891675240e+00 7.963484459851631614e-02
1.259277786548554899e-02 1.222633486758726962e+00 7.762562572638591341e-02
1.284463342279526001e-02 1.167566768825293311e+00 7.093000722979009298e-02
1.310152609125116510e-02 1.089555666823608826e+00 6.376147192504062755e-02
1.336355661307618917e-02 1.260180565406185282e+00 6.623896170383887505e-02
1.363082774533771330e-02 1.143730324205591620e+00 6.069157936496669126e-02
1.390344430024446735e-02 1.215330667375502882e+00 5.874977583562599609e-02
1.418151318624935597e-02 1.043005601021525397e+00 5.163880176123093052e-02
1.446514344997434399e-02 6.674822340491028960e-01 3.899473538181162657e-02
1.475444631897383091e-02 5.447085246388169155e-01 3.424081148881444325e-02
1.504953524535330989e-02 5.274565456656169493e-01 3.169419766307193798e-02
1.535052595026037414e-02 5.438251674919202250e-01 3.064351649632914704e-02
1.565753646926558440e-02 5.346647357332153794e-01 2.962219727241299089e-02
1.597068719865089512e-02 5.261059946437156576e-01 2.724290546651234740e-02
1.629010094262391128e-02 4.684491924845674560e-01 2.486305768472303612e-02
1.661590296147639340e-02 4.623489276791477032e-01 2.350251761258563554e-02
1.163378023199675496e-02 1.333231452768262271e+00 1.120325216433914889e-01
1.186645583663669019e-02 1.272776318767768089e+00 9.704521675696667349e-02
1.210378495336942445e-02 1.229402657601808224e+00 8.665255869866364535e-02
1.234586065243681308e-02 1.165307025507555494e+00 7.942147719880700285e-02
1.259277786548554899e-02 1.229105460998366484e+00 7.754071561638081755e-02
1.284463342279526001e-02 1.170930115369047453e+00 7.092123081092836789e-02
1.310152609125116510e-02 1.084928715184935166e+00 6.408731424751080985e-02
1.336355661307618917e-02 1.261171329706206601e+00 6.623254129901129383e-02
1.363082774533771330e-02 1.124471278305815725e+00 6.069356257827169393e-02
1.390344430024446735e-02 1.219348911006870706e+00 5.883114629558970632e-02
1.418151318624935597e-02 1.036269517915203942e+00 5.177554777259834456e-02
1.446514344997434399e-02 6.633964854841288838e-01 3.911897643624936277e-02
1.475444631897383091e-02 5.473469522311991131e-01 3.419214516579200530e-02
1.504953524535330989e-02 5.244791188922898195e-01 3.172859091826243233e-02
1.535052595026037414e-02 5.339733198163237882e-01 3.079313553587632174e-02
1.565753646926558440e-02 5.311423897407376860e-01 2.954501815311255664e-02
1.597068719865089512e-02 5.208062067105466708e-01 2.730162033661712520e-02
1.629010094262391128e-02 4.660024888970181189e-01 2.484147923614480821e-02
1.661590296147639340e-02 4.632706508823502545e-01 2.354937144261598850e-02
63 changes: 51 additions & 12 deletions reduction/lr_reduction/event_reduction.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ def __init__(self, scattering_workspace, direct_workspace,
tof_range=None, theta=1.0, instrument=None,
functional_background=False, dead_time=False,
paralyzable=True, dead_time_value=4.2,
dead_time_tof_step=100):
dead_time_tof_step=100, use_emission_time=True):
"""
Pixel ranges include the min and max pixels.

Expand All @@ -215,6 +215,7 @@ def __init__(self, scattering_workspace, direct_workspace,
:param paralyzable: if True, the dead time calculation will use the paralyzable approach
:param dead_time_value: value of the dead time in microsecond
:param dead_time_tof_step: TOF bin size in microsecond
:param use_emmission_time: if True, the emission time delay will be computed
"""
if instrument in [self.INSTRUMENT_4A, self.INSTRUMENT_4B]:
self.instrument = instrument
Expand All @@ -239,6 +240,7 @@ def __init__(self, scattering_workspace, direct_workspace,
self.paralyzable = paralyzable
self.dead_time_value = dead_time_value
self.dead_time_tof_step = dead_time_tof_step
self.use_emission_time = use_emission_time

# Turn on functional background estimation
self.use_functional_bck = functional_background
Expand All @@ -265,11 +267,12 @@ def extract_meta_data(self):
"""
Extract meta data from the data file.
"""
# Set up basic data
self.n_x = int(self._ws_sc.getInstrument().getNumberParameter("number-of-x-pixels")[0])
self.n_y = int(self._ws_sc.getInstrument().getNumberParameter("number-of-y-pixels")[0])
# Get instrument parameters
settings = read_settings(self._ws_sc)

self.pixel_width = float(self._ws_sc.getInstrument().getNumberParameter("pixel-width")[0]) / 1000.0
self.n_x = settings["number-of-x-pixels"]
self.n_y = settings["number-of-y-pixels"]
self.pixel_width = settings["pixel-width"] / 1000.0

if self.instrument == self.INSTRUMENT_4B:
self.extract_meta_data_4B()
Expand Down Expand Up @@ -326,17 +329,32 @@ def extract_meta_data_4B(self):
else:
self.det_distance = self.DEFAULT_4B_SAMPLE_DET_DISTANCE

if "source-det-distance" in settings:
# Check that we have the needed meta data for the emission delay calculation
if self.use_emission_time:
moderator_available = "BL4B:Chop:Skf2:ChopperModerator" in self._ws_sc.getRun()
if not moderator_available:
print("Moderator information unavailable: skipping emission time calculation")
self.use_emission_time = False

if self.use_emission_time:
mdoucet marked this conversation as resolved.
Show resolved Hide resolved
# Read the true distance from the data file. We will compute an emission time delay later
self.source_detector_distance = self._ws_sc.getRun().getProperty("BL4B:Det:TH:DlyDet:BasePath").value[0]
elif "source-det-distance" in settings:
# Use an effective source-detector distance that account for the average emission time delay
self.source_detector_distance = settings["source-det-distance"]
else:
# Use the nominal/default source-detector distance
self.source_detector_distance = self.DEFAULT_4B_SOURCE_DET_DISTANCE

def __repr__(self):
output = "sample-det: %s\n" % self.det_distance
output += "pixel: %s\n" % self.pixel_width
output += "WL: %s %s\n" % (self.wl_range[0], self.wl_range[1])
output += "Q: %s %s\n" % (self.q_min, self.q_max)
output += "Theta = %s" % self.theta
output = "Reduction settings:\n"
output += " sample-det: %s\n" % self.det_distance
output += " source-det: %s\n" % self.source_detector_distance
output += " pixel: %s\n" % self.pixel_width
output += " WL: %s %s\n" % (self.wl_range[0], self.wl_range[1])
output += " Q: %s %s\n" % (self.q_min, self.q_max)
output += " Theta = %s\n" % self.theta
output += " Emission delay = %s" % self.use_emission_time
return output

def to_dict(self):
Expand Down Expand Up @@ -581,7 +599,10 @@ def _reflectivity(self, ws, peak_position, peak, low_res, theta, q_bins=None, q_
if evt_list.getNumberEvents() == 0:
continue

wl_list = evt_list.getTofs() / self.constant
tofs = evt_list.getTofs()
if self.use_emission_time:
tofs = self.emission_time_correction(ws, tofs)
wl_list = tofs / self.constant

# Gravity correction
d_theta = self.gravity_correction(ws, wl_list)
Expand Down Expand Up @@ -762,6 +783,24 @@ def _off_specular(self, ws, wl_dist, wl_bins, x_bins, z_bins, peak_position, the

return refl, d_refl_sq

def emission_time_correction(self, ws, tofs):
"""
Coorect TOF for emission time delay in the moderator
:param ws: Mantid workspace
:param tofs: list of TOF values
"""
mt_run = ws.getRun()
use_emission_delay = False
if "BL4B:Chop:Skf2:ChopperModerator" in mt_run:
moderator_calc = mt_run.getProperty("BL4B:Chop:Skf2:ChopperModerator").value[0]
t_mult = mt_run.getProperty("BL4B:Chop:Skf2:ChopperMultiplier").value[0]
t_off = mt_run.getProperty("BL4B:Chop:Skf2:ChopperOffset").value[0]
use_emission_delay = moderator_calc == 1

if use_emission_delay:
tofs -= t_off + t_mult * tofs / self.constant
return tofs

def gravity_correction(self, ws, wl_list):
"""
Gravity correction for each event
Expand Down
30 changes: 22 additions & 8 deletions reduction/lr_reduction/peak_finding.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,16 @@
warnings.filterwarnings('ignore')

import mantid.simpleapi as api
from lmfit.models import GaussianModel, LinearModel, QuadraticModel, RectangleModel
from lmfit.models import GaussianModel


def process_data(workspace, summed=True, tof_step=200):
r"""
Process a Mantid workspace to extract counts vs pixel.
:param workspace: Mantid workspace
:param summed: if True, the x pixels with be summed
:param tof_step: TOF bin size
"""
tof_min = workspace.getTofMin()
tof_max = workspace.getTofMax()
_ws = api.Rebin(InputWorkspace=workspace, Params="%s,%s,%s" % (tof_min, tof_step, tof_max))
Expand All @@ -19,7 +25,6 @@ def process_data(workspace, summed=True, tof_step=200):
tof=_ws.extractX()[0]
tof = (tof[:-1]+tof[1:])/2.0


if summed:
y = np.sum(y, axis=2)

Expand All @@ -28,11 +33,23 @@ def process_data(workspace, summed=True, tof_step=200):
return tof, _x, _y


def fit_signal_flat_bck(x, y, x_min=110, x_max=170, center=None, sigma=None):
def fit_signal_flat_bck(x, y, x_min=110, x_max=170, center=None, sigma=None,
background=None):
r"""
Fit a Gaussian peak.
:param x: list of x values
:param y: list of y values
:param x_min: start index of the list of points
:param x_max: end index of the list of points
:param center: estimated center position
:param sigma: if provided, the sigma will be fixed to the given value
:param background: if provided, the value will be subtracted from y
"""
gauss = GaussianModel(prefix='g_')
linear = LinearModel(prefix='l_')

amplitude_guess = np.max(y[x_min:x_max])
if background is None:
background = np.min(y[x_min:x_max])

_center = 140
_sigma = 2
Expand All @@ -42,7 +59,6 @@ def fit_signal_flat_bck(x, y, x_min=110, x_max=170, center=None, sigma=None):
_sigma = sigma

pars = gauss.make_params(amplitude=amplitude_guess, center=_center, sigma=_sigma)
pars.update(linear.make_params(a=0, b=0))

if sigma is not None:
pars['g_sigma'].vary=False
Expand All @@ -53,11 +69,9 @@ def fit_signal_flat_bck(x, y, x_min=110, x_max=170, center=None, sigma=None):
weights=1/np.sqrt(y)
weights[y<1]=1

model = gauss + linear
fit = model.fit(y[x_min:x_max], pars, method='leastsq',
fit = gauss.fit(y[x_min:x_max]-background, pars, method='leastsq',
x=x[x_min:x_max],
weights=1/weights[x_min:x_max])
# print(fit.fit_report())

fit.params['g_amplitude']
c=fit.params['g_center']
Expand Down
11 changes: 11 additions & 0 deletions reduction/lr_reduction/reduction_template_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,9 @@ def __init__(self):
self.dead_time_value = 4.2
self.dead_time_tof_step = 100

# Calculate emission time delay instead of using an effective distance for all wavelengths
self.use_emission_time:bool = True

def from_dict(self, data_dict, permissible=True):
r"""
Update object's attributes with a dictionary with entries of the type attribute_name: attribute_value.
Expand Down Expand Up @@ -158,6 +161,9 @@ def to_xml(self):
_xml += "<dead_time_paralyzable>%s</dead_time_paralyzable>\n" % str(self.paralyzable)
_xml += "<dead_time_value>%s</dead_time_value>\n" % str(self.dead_time_value)
_xml += "<dead_time_tof_step>%s</dead_time_tof_step>\n" % str(self.dead_time_tof_step)

# Emission time correction
_xml += "<use_emission_time>%s</use_emission_time>\n" % str(self.use_emission_time)
_xml += "</RefLData>\n"

return _xml
Expand Down Expand Up @@ -269,6 +275,11 @@ def from_xml_element(self, instrument_dom):
self.dead_time_tof_step = getFloatElement(instrument_dom, "dead_time_tof_step",
default=self.dead_time_tof_step)

# Emission time
# Defaults to False for backward compatibility with templates where this option was not available.
self.use_emission_time = getBoolElement(instrument_dom, "use_emission_time",
default=False)


###### Utility functions to read XML content ########################
def getText(nodelist):
Expand Down
4 changes: 2 additions & 2 deletions reduction/lr_reduction/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
},
{
"from": "2024-08-26",
"value": 1.362
"value": 1.355
}
],
"number-of-x-pixels": [
Expand All @@ -34,7 +34,7 @@
"pixel-width": [
{
"from":"2014-10-10",
"value": 0.7
"value": 0.70
}
],
"xi-reference": [
Expand Down
20 changes: 18 additions & 2 deletions reduction/lr_reduction/template.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,16 +234,31 @@ def process_from_template_ws(ws_sc, template_data, q_summing=False,
paralyzable=template_data.paralyzable,
dead_time_value=template_data.dead_time_value,
dead_time_tof_step=template_data.dead_time_tof_step,
use_emission_time=template_data.use_emission_time,
functional_background=template_data.two_backgrounds,
instrument=event_reduction.EventReflectivity.INSTRUMENT_4B)
print(event_refl)

# R(Q)
qz, refl, d_refl = event_refl.specular(q_summing=q_summing, tof_weighted=tof_weighted,
bck_in_q=bck_in_q, clean=clean, normalize=normalize)
qz_mid = (qz[:-1] + qz[1:])/2.0

print("Normalization options: %s %s" % (normalize, template_data.scaling_factor_flag))
if normalize and template_data.scaling_factor_flag:
# When using composite direct beam, we don't need a scaling
# factor file if the multiplier is in the logs
scaling_factor_PV = 'BL4B:CS:Autoreduce:ScaleMultiplier'
if normalize and scaling_factor_PV in ws_db.getRun():
# The standard changed during early implementation...
if int(template_data.norm_file) > 212005:
a = ws_db.getRun()[scaling_factor_PV].value[0]**2
else:
a = ws_db.getRun()[scaling_factor_PV].value[0]
print("Composite scaling factor: %s" % a)
d_refl /= a
refl /= a
b = err_a = err_b = 0
elif normalize and template_data.scaling_factor_flag:
print("Normalization options: %s %s" % (normalize, template_data.scaling_factor_flag))
# Get the scaling factors
a, b, err_a, err_b = scaling_factor(template_data.scaling_factor_file, ws_sc)

Expand All @@ -256,6 +271,7 @@ def process_from_template_ws(ws_sc, template_data, q_summing=False,
d_refl = np.sqrt(d_refl**2/a_q**2 + refl**2*d_a_q**2/a_q**4)
refl /= a_q
else:
print("Skipping scaling factor")
a = b = 1
err_a = err_b = 0

Expand Down
11 changes: 11 additions & 0 deletions reduction/test/test_reduction_template_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,17 @@ def test_two_backgrounds(self):
redparms.two_backgrounds = True
assert "<two_backgrounds_flag>True</two_backgrounds_flag>" in redparms.to_xml()

def test_emission_delay(self):
r"""verify the xml dump writes the emission delay option"""
redparms = ReductionParameters()

# Default should be True
assert redparms.use_emission_time == True
assert "<use_emission_time>True</use_emission_time>" in redparms.to_xml()

redparms.use_emission_time = False
assert "<use_emission_time>False</use_emission_time>" in redparms.to_xml()

def test_from_dict(self):
r"""verify method from_dict raises when passed some nonsense"""

Expand Down
Loading