Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cast wres as complex before rephasing #980

Merged
merged 2 commits into from
Oct 24, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
98 changes: 51 additions & 47 deletions hera_cal/frf.py
Original file line number Diff line number Diff line change
Expand Up @@ -2000,16 +2000,17 @@ def time_average_argparser():

return ap


def get_frop_for_noise(times, filter_cent_use, filter_half_wid_use, freqs,
t_avg=300., eigenval_cutoff=1e-9, weights=None,
coherent_avg=True, wgt_tavg_by_nsample=True,
nsamples=None, rephase=True, bl_vec=None,
t_avg=300., eigenval_cutoff=1e-9, weights=None,
coherent_avg=True, wgt_tavg_by_nsample=True,
nsamples=None, rephase=True, bl_vec=None,
lat=-30.721526120689507, dlst=None):
"""
Get FRF + coherent time average operator for the purposes of noise
Get FRF + coherent time average operator for the purposes of noise
covariance calculation. Composes time averaging and the main lobe FRF into
a single operation.

Parameters:
times (array):
(Interleaved) Julian Dates on hd, converted to seconds.
Expand All @@ -2029,7 +2030,7 @@ def get_frop_for_noise(times, filter_cent_use, filter_half_wid_use, freqs,
coherent_avg (bool):
Whether to coherently average on the t_avg cadence or not.
wgt_tavg_by_nsample (bool):
Whether to weight the time average by nsamples.
Whether to weight the time average by nsamples.
Otherwise do uniform weighting.
nsamples (array):
Array proportional to how many nights contributed to each sample.
Expand All @@ -2042,43 +2043,43 @@ def get_frop_for_noise(times, filter_cent_use, filter_half_wid_use, freqs,
dlst (float or array_like):
Amount of LST to rephase by, in radians. If float, rephases all
times by the same amount.

Returns:
frop (array):
Filter operator. Shape (Ntimes_coarse, Ntimes_fine, Nfreqs).
"""

# Time handling is a slightly modified port from hera_filters/hera_cal

Ntimes = len(times)
Nfreqs = len(freqs)

dmatr, _ = dspec.dpss_operator(times, np.array([filter_cent_use, ]),
np.array([filter_half_wid_use, ]),
eigenval_cutoff=np.array([eigenval_cutoff, ]))
Nmodes = dmatr.shape[-1]
if weights is None:

if weights is None:
weights = np.ones([Ntimes, Nfreqs])
elif weights.shape != (Ntimes, Nfreqs):
raise ValueError(f"weights has wrong shape {weights.shape} "
f"compared to (Ntimes, Nfreqs) = {(Ntimes, Nfreqs)}"
" May need to be sliced along an axis.")
#####Index Legend#####
raise ValueError(f"weights has wrong shape {weights.shape} "
f"compared to (Ntimes, Nfreqs) = {(Ntimes, Nfreqs)}"
" May need to be sliced along an axis.")

# Index Legend#####
# a = DPSS mode #
# f = frequency #
# t = fine time #
# T = coarse time #
#####Index Legend#####
ddagW = dmatr.T.conj()[:, np.newaxis] * weights.T # aft
ddagWd = ddagW @ dmatr # afa
lsq = np.linalg.solve(ddagWd.swapaxes(0,1), ddagW.swapaxes(0,1)) # fat
# Index Legend#####

ddagW = dmatr.T.conj()[:, np.newaxis] * weights.T # aft
ddagWd = ddagW @ dmatr # afa
lsq = np.linalg.solve(ddagWd.swapaxes(0, 1), ddagW.swapaxes(0, 1)) # fat

if coherent_avg:
dtime = np.median(np.abs(np.diff(times)))
chunk_size = int(np.round((t_avg / dtime)))
chunk_size = int(np.round((t_avg / dtime)))
Nchunk = int(np.ceil(Ntimes / chunk_size))
chunk_remainder = Ntimes % chunk_size

Expand All @@ -2087,33 +2088,33 @@ def get_frop_for_noise(times, filter_cent_use, filter_half_wid_use, freqs,
raise ValueError(f"nsamples has wrong shape {nsamples.shape} "
f"compared to (Ntimes, Nfreqs) = {(Ntimes, Nfreqs)}"
" May need to be sliced along an axis.")
if chunk_remainder > 0: # Stack some 0s that get 0 weight so we can do the reshaping below without incident
if chunk_remainder > 0: # Stack some 0s that get 0 weight so we can do the reshaping below without incident

dmatr_stack_shape = [chunk_size - chunk_remainder, Nmodes]
weights_stack_shape = [chunk_size - chunk_remainder, Nfreqs]
dmatr = np.vstack((dmatr, np.zeros(dmatr_stack_shape, dtype=complex)))
tavg_weights = np.vstack((tavg_weights, np.zeros(weights_stack_shape, dtype=complex)))
dlst = np.append(dlst, np.zeros(chunk_size - chunk_remainder, dtype=float))

dres = dmatr.reshape(Nchunk, chunk_size, Nmodes)
wres = tavg_weights.reshape(Nchunk, chunk_size, Nfreqs)
wres = tavg_weights.reshape(Nchunk, chunk_size, Nfreqs).astype(complex)
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the only substantive change

wnorm = wres.sum(axis=1)[:, np.newaxis]
# normalize for an average
wres = np.where(wnorm > 0, wres / wnorm, 0)

# Apply the rephase to the weights matrix since it's mathematically equivalent and convenient
if rephase:
if rephase:
wres = utils.lst_rephase(wres.reshape(Nchunk * chunk_size, 1, Nfreqs),
bl_vec, freqs, dlst, lat=lat, inplace=False)
wres = wres.reshape(Nchunk, chunk_size, Nfreqs)

# does "Ttf,Tta->Tfa" much faster than einsum and fancy indexing
dchunk = np.zeros([Nchunk, Nfreqs, Nmodes], dtype=complex)
for coarse_time_ind in range(Nchunk):
dchunk[coarse_time_ind] = np.tensordot(wres[coarse_time_ind].T,
dchunk[coarse_time_ind] = np.tensordot(wres[coarse_time_ind].T,
dres[coarse_time_ind],
axes=1)

# does "Tfa,fat->Ttf" faster than einsum and fancy indexing
frop = np.zeros([Nchunk, Ntimes, Nfreqs], dtype=complex)
for freq_ind in range(Nfreqs):
Expand All @@ -2123,31 +2124,32 @@ def get_frop_for_noise(times, filter_cent_use, filter_half_wid_use, freqs,
else:
# ta,fat->ttf
frop = np.tensordot(dmatr, lsq.transpose(1, 2, 0), axes=1)

return frop

def prep_var_for_frop(data, nsamples, weights, cross_antpairpol, freq_slice,

def prep_var_for_frop(data, nsamples, weights, cross_antpairpol, freq_slice,
auto_ant=None, default_value=0., verbose=False):
"""
Wrapper around hera_cal.noise.predict_noise_variance_from_autos that preps
the noise variance calculation for FRF + coherent average.

Parameters:
data: DataContainer
Must contain the cross_antpairpol in question as well as some
Must contain the cross_antpairpol in question as well as some
corresponding autos.
nsamples: DataContainer
Contains the nsamples associated with the cross_antpairpol in question.
weights: DataContainer
Contains the weights associated with the cross_antpairpol in question.
cross_antpairpol: tuple
Tuple whose first two entries are antennas and last entry is a
polarization.
polarization.
freq_slice: slice
A slice into the frequency axis of the data that all gets filtered
identically (a PS analysis band).
auto_ant: int
If not None, should be a single integer specifying a single
If not None, should be a single integer specifying a single
antenna's autos (this is used because the redundantly averaged data
have a single auto file for all antennas).
default_value: (float)
Expand All @@ -2170,13 +2172,14 @@ def prep_var_for_frop(data, nsamples, weights, cross_antpairpol, freq_slice,
all_nonfinite_zero = np.all(weights[cross_antpairpol][:, freq_slice][var_isnotfinite]) == 0
if not all_nonfinite_zero:
warnings.warn("Not all nonfinite variance locations are of zero weight!")

if verbose:
print(f"Replacing nonfinite variances with default value: {default_value}")
var[var_isnotfinite] = default_value

return var


def get_FRF_cov(frop, var):
"""
Get noise covariance from noise variance and given FRF + coherent average operator.
Expand All @@ -2198,9 +2201,10 @@ def get_FRF_cov(frop, var):
for freq_ind in range(Nfreqs):
cov[freq_ind] = np.tensordot((frop[:, :, freq_ind] * var[:, freq_ind]),
frop[:, :, freq_ind].T.conj(), axes=1)

return cov


def get_corr(cov):
"""
Get correlation matrices from a sequence of covariance matrices.
Expand All @@ -2215,15 +2219,16 @@ def get_corr(cov):
Ntimes = cov.shape[1]
diags = cov[:, np.arange(Ntimes), np.arange(Ntimes)]
corr = cov / np.sqrt(diags[:, None] * diags[:, :, None])

return corr


def get_correction_factor_from_cov(cov, tslc=None):
"""
Get a correction factor for PS noise variance prediction in the absence
of propagating the noise covariances all the way to delay space. This is
based on HERA memo #132, where it is shown that one may calculate an
effective number of degrees of freedom based on the variance of a
of propagating the noise covariances all the way to delay space. This is
based on HERA memo #132, where it is shown that one may calculate an
effective number of degrees of freedom based on the variance of a
generalized chi-square random variable.

Parameters:
Expand All @@ -2238,14 +2243,13 @@ def get_correction_factor_from_cov(cov, tslc=None):
The correction factor.
"""
corr = get_corr(cov)

if tslc is None:
Ncotimes = corr.shape[1]
Neff = Ncotimes**2 / np.sum(np.abs(corr)**2, axis=(1, 2))
else:
Ncotimes = tslc.stop - tslc.start
Neff = Ncotimes**2 / np.sum(np.abs(corr[:, tslc, tslc])**2, axis=(1, 2))
correction_factor = np.mean(Ncotimes / Neff) # Average over frequency since they are independent.

return correction_factor
correction_factor = np.mean(Ncotimes / Neff) # Average over frequency since they are independent.

return correction_factor
Loading