From 4e2bf10841bd71cdda5a57afd84b4d7ba3187219 Mon Sep 17 00:00:00 2001 From: Josh Dillon Date: Thu, 24 Oct 2024 14:27:20 -0700 Subject: [PATCH 1/2] cast wres as complex before rephasing also, formatting fixes --- hera_cal/frf.py | 98 +++++++++++++++++++++++++------------------------ 1 file changed, 51 insertions(+), 47 deletions(-) diff --git a/hera_cal/frf.py b/hera_cal/frf.py index 38d1964c7..42154751c 100644 --- a/hera_cal/frf.py +++ b/hera_cal/frf.py @@ -2000,16 +2000,17 @@ def time_average_argparser(): return ap + def get_frop_for_noise(times, filter_cent_use, filter_half_wid_use, freqs, - t_avg=300., eigenval_cutoff=1e-9, weights=None, - coherent_avg=True, wgt_tavg_by_nsample=True, - nsamples=None, rephase=True, bl_vec=None, + t_avg=300., eigenval_cutoff=1e-9, weights=None, + coherent_avg=True, wgt_tavg_by_nsample=True, + nsamples=None, rephase=True, bl_vec=None, lat=-30.721526120689507, dlst=None): """ - Get FRF + coherent time average operator for the purposes of noise + Get FRF + coherent time average operator for the purposes of noise covariance calculation. Composes time averaging and the main lobe FRF into a single operation. - + Parameters: times (array): (Interleaved) Julian Dates on hd, converted to seconds. @@ -2029,7 +2030,7 @@ def get_frop_for_noise(times, filter_cent_use, filter_half_wid_use, freqs, coherent_avg (bool): Whether to coherently average on the t_avg cadence or not. wgt_tavg_by_nsample (bool): - Whether to weight the time average by nsamples. + Whether to weight the time average by nsamples. Otherwise do uniform weighting. nsamples (array): Array proportional to how many nights contributed to each sample. @@ -2042,43 +2043,43 @@ def get_frop_for_noise(times, filter_cent_use, filter_half_wid_use, freqs, dlst (float or array_like): Amount of LST to rephase by, in radians. If float, rephases all times by the same amount. - + Returns: frop (array): Filter operator. Shape (Ntimes_coarse, Ntimes_fine, Nfreqs). """ # Time handling is a slightly modified port from hera_filters/hera_cal - + Ntimes = len(times) Nfreqs = len(freqs) - + dmatr, _ = dspec.dpss_operator(times, np.array([filter_cent_use, ]), np.array([filter_half_wid_use, ]), eigenval_cutoff=np.array([eigenval_cutoff, ])) Nmodes = dmatr.shape[-1] - - if weights is None: + + if weights is None: weights = np.ones([Ntimes, Nfreqs]) elif weights.shape != (Ntimes, Nfreqs): - raise ValueError(f"weights has wrong shape {weights.shape} " - f"compared to (Ntimes, Nfreqs) = {(Ntimes, Nfreqs)}" - " May need to be sliced along an axis.") - - #####Index Legend##### + raise ValueError(f"weights has wrong shape {weights.shape} " + f"compared to (Ntimes, Nfreqs) = {(Ntimes, Nfreqs)}" + " May need to be sliced along an axis.") + + # Index Legend##### # a = DPSS mode # # f = frequency # # t = fine time # # T = coarse time # - #####Index Legend##### - - ddagW = dmatr.T.conj()[:, np.newaxis] * weights.T # aft - ddagWd = ddagW @ dmatr # afa - lsq = np.linalg.solve(ddagWd.swapaxes(0,1), ddagW.swapaxes(0,1)) # fat + # Index Legend##### + + ddagW = dmatr.T.conj()[:, np.newaxis] * weights.T # aft + ddagWd = ddagW @ dmatr # afa + lsq = np.linalg.solve(ddagWd.swapaxes(0, 1), ddagW.swapaxes(0, 1)) # fat if coherent_avg: dtime = np.median(np.abs(np.diff(times))) - chunk_size = int(np.round((t_avg / dtime))) + chunk_size = int(np.round((t_avg / dtime))) Nchunk = int(np.ceil(Ntimes / chunk_size)) chunk_remainder = Ntimes % chunk_size @@ -2087,33 +2088,33 @@ def get_frop_for_noise(times, filter_cent_use, filter_half_wid_use, freqs, raise ValueError(f"nsamples has wrong shape {nsamples.shape} " f"compared to (Ntimes, Nfreqs) = {(Ntimes, Nfreqs)}" " May need to be sliced along an axis.") - if chunk_remainder > 0: # Stack some 0s that get 0 weight so we can do the reshaping below without incident - + if chunk_remainder > 0: # Stack some 0s that get 0 weight so we can do the reshaping below without incident + dmatr_stack_shape = [chunk_size - chunk_remainder, Nmodes] weights_stack_shape = [chunk_size - chunk_remainder, Nfreqs] dmatr = np.vstack((dmatr, np.zeros(dmatr_stack_shape, dtype=complex))) tavg_weights = np.vstack((tavg_weights, np.zeros(weights_stack_shape, dtype=complex))) dlst = np.append(dlst, np.zeros(chunk_size - chunk_remainder, dtype=float)) - + dres = dmatr.reshape(Nchunk, chunk_size, Nmodes) wres = tavg_weights.reshape(Nchunk, chunk_size, Nfreqs) wnorm = wres.sum(axis=1)[:, np.newaxis] # normalize for an average wres = np.where(wnorm > 0, wres / wnorm, 0) - + # Apply the rephase to the weights matrix since it's mathematically equivalent and convenient - if rephase: - wres = utils.lst_rephase(wres.reshape(Nchunk * chunk_size, 1, Nfreqs), + if rephase: + wres = utils.lst_rephase(wres.reshape(Nchunk * chunk_size, 1, Nfreqs).astype(complex), bl_vec, freqs, dlst, lat=lat, inplace=False) wres = wres.reshape(Nchunk, chunk_size, Nfreqs) - + # does "Ttf,Tta->Tfa" much faster than einsum and fancy indexing dchunk = np.zeros([Nchunk, Nfreqs, Nmodes], dtype=complex) for coarse_time_ind in range(Nchunk): - dchunk[coarse_time_ind] = np.tensordot(wres[coarse_time_ind].T, + dchunk[coarse_time_ind] = np.tensordot(wres[coarse_time_ind].T, dres[coarse_time_ind], axes=1) - + # does "Tfa,fat->Ttf" faster than einsum and fancy indexing frop = np.zeros([Nchunk, Ntimes, Nfreqs], dtype=complex) for freq_ind in range(Nfreqs): @@ -2123,10 +2124,11 @@ def get_frop_for_noise(times, filter_cent_use, filter_half_wid_use, freqs, else: # ta,fat->ttf frop = np.tensordot(dmatr, lsq.transpose(1, 2, 0), axes=1) - + return frop -def prep_var_for_frop(data, nsamples, weights, cross_antpairpol, freq_slice, + +def prep_var_for_frop(data, nsamples, weights, cross_antpairpol, freq_slice, auto_ant=None, default_value=0., verbose=False): """ Wrapper around hera_cal.noise.predict_noise_variance_from_autos that preps @@ -2134,7 +2136,7 @@ def prep_var_for_frop(data, nsamples, weights, cross_antpairpol, freq_slice, Parameters: data: DataContainer - Must contain the cross_antpairpol in question as well as some + Must contain the cross_antpairpol in question as well as some corresponding autos. nsamples: DataContainer Contains the nsamples associated with the cross_antpairpol in question. @@ -2142,12 +2144,12 @@ def prep_var_for_frop(data, nsamples, weights, cross_antpairpol, freq_slice, Contains the weights associated with the cross_antpairpol in question. cross_antpairpol: tuple Tuple whose first two entries are antennas and last entry is a - polarization. + polarization. freq_slice: slice A slice into the frequency axis of the data that all gets filtered identically (a PS analysis band). auto_ant: int - If not None, should be a single integer specifying a single + If not None, should be a single integer specifying a single antenna's autos (this is used because the redundantly averaged data have a single auto file for all antennas). default_value: (float) @@ -2170,13 +2172,14 @@ def prep_var_for_frop(data, nsamples, weights, cross_antpairpol, freq_slice, all_nonfinite_zero = np.all(weights[cross_antpairpol][:, freq_slice][var_isnotfinite]) == 0 if not all_nonfinite_zero: warnings.warn("Not all nonfinite variance locations are of zero weight!") - + if verbose: print(f"Replacing nonfinite variances with default value: {default_value}") var[var_isnotfinite] = default_value - + return var + def get_FRF_cov(frop, var): """ Get noise covariance from noise variance and given FRF + coherent average operator. @@ -2198,9 +2201,10 @@ def get_FRF_cov(frop, var): for freq_ind in range(Nfreqs): cov[freq_ind] = np.tensordot((frop[:, :, freq_ind] * var[:, freq_ind]), frop[:, :, freq_ind].T.conj(), axes=1) - + return cov + def get_corr(cov): """ Get correlation matrices from a sequence of covariance matrices. @@ -2215,15 +2219,16 @@ def get_corr(cov): Ntimes = cov.shape[1] diags = cov[:, np.arange(Ntimes), np.arange(Ntimes)] corr = cov / np.sqrt(diags[:, None] * diags[:, :, None]) - + return corr + def get_correction_factor_from_cov(cov, tslc=None): """ Get a correction factor for PS noise variance prediction in the absence - of propagating the noise covariances all the way to delay space. This is - based on HERA memo #132, where it is shown that one may calculate an - effective number of degrees of freedom based on the variance of a + of propagating the noise covariances all the way to delay space. This is + based on HERA memo #132, where it is shown that one may calculate an + effective number of degrees of freedom based on the variance of a generalized chi-square random variable. Parameters: @@ -2238,14 +2243,13 @@ def get_correction_factor_from_cov(cov, tslc=None): The correction factor. """ corr = get_corr(cov) - + if tslc is None: Ncotimes = corr.shape[1] Neff = Ncotimes**2 / np.sum(np.abs(corr)**2, axis=(1, 2)) else: Ncotimes = tslc.stop - tslc.start Neff = Ncotimes**2 / np.sum(np.abs(corr[:, tslc, tslc])**2, axis=(1, 2)) - correction_factor = np.mean(Ncotimes / Neff) # Average over frequency since they are independent. - - return correction_factor + correction_factor = np.mean(Ncotimes / Neff) # Average over frequency since they are independent. + return correction_factor From 3a57ed882dbd96f178511f4f6b35952531b9e028 Mon Sep 17 00:00:00 2001 From: Josh Dillon Date: Thu, 24 Oct 2024 14:32:22 -0700 Subject: [PATCH 2/2] slightly safer solution --- hera_cal/frf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hera_cal/frf.py b/hera_cal/frf.py index 42154751c..23aa73420 100644 --- a/hera_cal/frf.py +++ b/hera_cal/frf.py @@ -2097,14 +2097,14 @@ def get_frop_for_noise(times, filter_cent_use, filter_half_wid_use, freqs, dlst = np.append(dlst, np.zeros(chunk_size - chunk_remainder, dtype=float)) dres = dmatr.reshape(Nchunk, chunk_size, Nmodes) - wres = tavg_weights.reshape(Nchunk, chunk_size, Nfreqs) + wres = tavg_weights.reshape(Nchunk, chunk_size, Nfreqs).astype(complex) wnorm = wres.sum(axis=1)[:, np.newaxis] # normalize for an average wres = np.where(wnorm > 0, wres / wnorm, 0) # Apply the rephase to the weights matrix since it's mathematically equivalent and convenient if rephase: - wres = utils.lst_rephase(wres.reshape(Nchunk * chunk_size, 1, Nfreqs).astype(complex), + wres = utils.lst_rephase(wres.reshape(Nchunk * chunk_size, 1, Nfreqs), bl_vec, freqs, dlst, lat=lat, inplace=False) wres = wres.reshape(Nchunk, chunk_size, Nfreqs)