Skip to content

Commit

Permalink
Merge branch 'develop' into MAP_RichardsonLucy
Browse files Browse the repository at this point in the history
  • Loading branch information
hiyoneda committed Oct 4, 2024
2 parents 30c4f9a + 99fdff7 commit a338c9f
Show file tree
Hide file tree
Showing 15 changed files with 562 additions and 44 deletions.
32 changes: 16 additions & 16 deletions cosipy/response/FullDetectorResponse.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ def _open_rsp(cls, filename, Spectrumfile=None,norm="Linear" ,single_pixel = Fal
try :
norm = str(line[1])
except :
print(f"norm not found in the file ! We assume {norm}")
logger.info(f"norm not found in the file ! We assume {norm}")

if norm =="Linear" :
emin = int(line[2])
Expand Down Expand Up @@ -255,12 +255,12 @@ def _open_rsp(cls, filename, Spectrumfile=None,norm="Linear" ,single_pixel = Fal
assert nevents_sim != 0,"number of simulated events is 0 !"


print("normalisation is {0}".format(norm))
logger.info("normalisation is {0}".format(norm))
if sparse == None :
print("Sparse paramater not found in the file : We assume this is a non sparse matrice !")
logger.info("Sparse paramater not found in the file : We assume this is a non sparse matrice !")
sparse = False
else :
print("Sparse matrice ? {0}".format(sparse))
logger.info("Sparse matrice ? {0}".format(sparse))
edges = ()

for axis_edges, axis_type in zip(axes_edges, axes_types):
Expand Down Expand Up @@ -298,8 +298,8 @@ def _open_rsp(cls, filename, Spectrumfile=None,norm="Linear" ,single_pixel = Fal

# If fast method fails, use long method, which should work in all cases.
except:
print("Initial attempt failed.")
print("Using long method...")
logger.info("Initial attempt failed.")
logger.info("Using long method...")
nlines = sum(1 for _ in gzip.open(filename,"rt"))

# Preallocate arrays
Expand All @@ -308,7 +308,7 @@ def _open_rsp(cls, filename, Spectrumfile=None,norm="Linear" ,single_pixel = Fal

# Calculate the memory usage in Gigabytes
memory_size = ((nlines * data.itemsize)+(axes.ndim*nlines*coords.itemsize))/(1024*1024*1024)
print(f"Estimated RAM you need to read the file : {memory_size} GB")
logger.info(f"Estimated RAM you need to read the file : {memory_size} GB")



Expand All @@ -320,7 +320,7 @@ def _open_rsp(cls, filename, Spectrumfile=None,norm="Linear" ,single_pixel = Fal

# Calculate the memory usage in Gigabytes
memory_size = (nlines * data.itemsize)/(1024*1024*1024)
print(f"Estimated RAM you need to read the file : {memory_size} GB")
logger.info(f"Estimated RAM you need to read the file : {memory_size} GB")


# Loop
Expand Down Expand Up @@ -381,7 +381,7 @@ def _open_rsp(cls, filename, Spectrumfile=None,norm="Linear" ,single_pixel = Fal
if binLine :
#check we have same number of bin than values read
if len(line)!=nbins :
print("nb of bin content read ({0}) != nb of bins {1}".format(len(line),nbins))
logger.info("nb of bin content read ({0}) != nb of bins {1}".format(len(line),nbins))
sys.exit()

for i in tqdm(range(nbins), desc="Processing", unit="bin"):
Expand All @@ -393,7 +393,7 @@ def _open_rsp(cls, filename, Spectrumfile=None,norm="Linear" ,single_pixel = Fal

break

print("response file read ! Now we create the histogram and weight in order to "+
logger.info("response file read ! Now we create the histogram and weight in order to "+
"get the effective area")
# create histpy histogram

Expand Down Expand Up @@ -424,11 +424,11 @@ def _open_rsp(cls, filename, Spectrumfile=None,norm="Linear" ,single_pixel = Fal
gauss_int = 0.5 * (1 + erf( (edges[0]-Gauss_mean)/(4*np.sqrt(2)) ) ) + 0.5 * (1 + erf( (edges[1]-Gauss_mean)/(4*np.sqrt(2)) ) )

assert gauss_int == 1, "The gaussian spectrum is not fully contained in this single bin !"
print("Only one bin so we will use the Mono normalisation")
logger.info("Only one bin so we will use the Mono normalisation")
norm ="Mono"

if Spectrumfile is not None and norm=="file":
print("normalisation : spectrum file")
logger.info("normalisation : spectrum file")
# From spectrum file
spec = pd.read_csv(Spectrumfile, sep=" ")
spec = spec.iloc[:-1]
Expand All @@ -443,7 +443,7 @@ def _open_rsp(cls, filename, Spectrumfile=None,norm="Linear" ,single_pixel = Fal
nperchannel_norm = hspec[:]

elif norm=="powerlaw":
print("normalisation : powerlaw with index {0} with energy range [{1}-{2}]keV".format(alpha,emin,emax))
logger.info("normalisation : powerlaw with index {0} with energy range [{1}-{2}]keV".format(alpha,emin,emax))
# From powerlaw

e_lo = dr.axes['Ei'].lower_bounds
Expand All @@ -466,11 +466,11 @@ def _open_rsp(cls, filename, Spectrumfile=None,norm="Linear" ,single_pixel = Fal
nperchannel_norm = (e_hi**a - e_lo**a) / (emax**a - emin**a)

elif norm =="Linear" :
print("normalisation : linear with energy range [{0}-{1}]".format(emin,emax))
logger.info("normalisation : linear with energy range [{0}-{1}]".format(emin,emax))
nperchannel_norm = ewidth / (emax-emin)

elif norm=="Mono" :
print("normalisation : mono")
logger.info("normalisation : mono")

nperchannel_norm = np.array([1.])

Expand Down Expand Up @@ -1163,7 +1163,7 @@ def command_dump():
apar.error(f"Argument '{option}' not valid for 'dump' command")

if args.output is None:
print(result)
logger.info(result)
else:
logger.info(f"Saving result to {Path(args.output).resolve()}")
f = open(args.output, 'a')
Expand Down
6 changes: 3 additions & 3 deletions cosipy/spacecraftfile/SpacecraftFile.py
Original file line number Diff line number Diff line change
Expand Up @@ -956,7 +956,7 @@ def plot_rmf(self, file_name = None, save_name = None, dpi = 300):

# Read the MATRIX extension
matrix_ext = self.rmf['MATRIX']
#print(repr(matrix_hdu.header[:60]))
#logger.info(repr(matrix_hdu.header[:60]))
energy_low = matrix_ext.data["ENERG_LO"] # energy bin lower edges for measured energies
energy_high = matrix_ext.data["ENERG_HI"] # energy bin higher edges for measured energies
data = matrix_ext.data
Expand Down Expand Up @@ -988,13 +988,13 @@ def plot_rmf(self, file_name = None, save_name = None, dpi = 300):
energy_all_edges = np.append(energy_low,energy_high[-1])
#bin_edges = np.array([incident_energy_bins,incident_energy_bins]) # doesn't work
bin_edges = np.vstack((energy_all_edges, energy_all_edges))
#print(bin_edges)
#logger.info(bin_edges)

self.probability = []
for i in np.arange(10):
for j in np.arange(10):
self.probability.append(rmf_matrix[i][j])
#print(type(probability))
#logger.info(type(probability))

plt.hist2d(x=x_center_coords,y=y_center_coords,weights=self.probability,bins=bin_edges, norm=LogNorm())
plt.xscale('log')
Expand Down
5 changes: 5 additions & 0 deletions cosipy/test_data/20280301_2s.ori
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
Type OrientationsGalactic
OG 1835487300.0 68.44034002307066 44.61117227945379 -21.559659976929343 44.61117227945379
OG 1835487301.0 68.38920658776064 44.642124027021296 -21.610793412239364 44.642124027021296
OG 1835487302.0 68.3380787943012 44.67309722321497 -21.661921205698793 44.67309722321497
EN
Binary file added cosipy/test_data/ei_cds_array.npy
Binary file not shown.
Binary file added cosipy/test_data/ts_map_bkg.h5
Binary file not shown.
Binary file added cosipy/test_data/ts_map_signal.h5
Binary file not shown.
Binary file added cosipy/test_data/ts_map_src_bkg.h5
Binary file not shown.
10 changes: 6 additions & 4 deletions cosipy/ts_map/TSMap.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@

import astropy.io.fits as fits

import logging
logger = logging.getLogger(__name__)

class TSMap:

Expand Down Expand Up @@ -175,8 +177,8 @@ def ts_grid_data(self):
for j in range(self.log_like.axes['dec'].nbins):

# progress
print(f"\rra = {i:2d}/{self.log_like.axes['ra'].nbins} ", end = "")
print(f"dec = {j:2d}/{self.log_like.axes['dec'].nbins} ", end = "")
logger.info(f"\rra = {i:2d}/{self.log_like.axes['ra'].nbins} ", end = "")
logger.info(f"dec = {j:2d}/{self.log_like.axes['dec'].nbins} ", end = "")

# changing the position parameters
# converting rad to deg due to ra and dec in 3ML PointSource
Expand Down Expand Up @@ -237,10 +239,10 @@ def print_best_fit(self):
else:
self.best_ra = (self.ts.axes['ra'].centers[self.argmax[0]]) * (180/np.pi) # deg
self.best_dec = self.ts.axes['dec'].centers[self.argmax[1]] * (180/np.pi) # deg
print(f"Best fit position: RA = {self.best_ra} deg, Dec = {self.best_dec} deg")
logger.info(f"Best fit position: RA = {self.best_ra} deg, Dec = {self.best_dec} deg")

# convert to significance based on Wilk's theorem
print(f"Expected significance: {stats.norm.isf(stats.chi2.sf(self.ts_max, df = 2)):.1f} sigma")
logger.info(f"Expected significance: {stats.norm.isf(stats.chi2.sf(self.ts_max, df = 2)):.1f} sigma")

def save_ts(self, output_file_name):

Expand Down
3 changes: 2 additions & 1 deletion cosipy/ts_map/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
from .TSMap import TSMap
from .fast_ts_fit import FastTSMap
from .fast_ts_fit import FastTSMap
from .fast_norm_fit import FastNormFit
46 changes: 26 additions & 20 deletions cosipy/ts_map/fast_ts_fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@
import gc
import matplotlib.pyplot as plt

import logging
logger = logging.getLogger(__name__)

class FastTSMap():

def __init__(self, data, bkg_model, response_path, orientation = None, cds_frame = "local", scheme = "RING"):
Expand Down Expand Up @@ -236,22 +239,22 @@ def get_ei_cds_array(hypothesis_coord, energy_channel, response_path, spectrum,
quiet = True)
#time_coord_convert_end = time.time()
#time_coord_convert_used = time_coord_convert_end - time_coord_convert_start
#print(f"The time used for coordinate conversion is {time_coord_convert_used}s.")
#logger.info(f"The time used for coordinate conversion is {time_coord_convert_used}s.")

#time_dwell_start = time.time()
# get the dwell time map: the map of the time spent on each pixel in the local frame
dwell_time_map = orientation.get_dwell_map(response = response_path)
#time_dwell_end = time.time()
#time_dwell_used = time_dwell_end - time_dwell_start
#print(f"The time used for dwell time map is {time_dwell_used}s.")
#logger.info(f"The time used for dwell time map is {time_dwell_used}s.")

#time_psr_start = time.time()
# convolve the response with the dwell_time_map to get the point source response
with FullDetectorResponse.open(response_path) as response:
psr = response.get_point_source_response(dwell_time_map)
#time_psr_end = time.time()
#time_psr_used = time_psr_end - time_psr_start
#print(f"The time used for psr is {time_psr_used}s.")
#logger.info(f"The time used for psr is {time_psr_used}s.")

elif cds_frame == "galactic":

Expand All @@ -272,7 +275,7 @@ def get_ei_cds_array(hypothesis_coord, energy_channel, response_path, spectrum,

#time_cds_end = time.time()
#time_cds_used = time_cds_end - time_cds_start
#print(f"The time used for cds is {time_cds_used}s.")
#logger.info(f"The time used for cds is {time_cds_used}s.")

return ei_cds_array

Expand Down Expand Up @@ -304,7 +307,7 @@ def fast_ts_fit(hypothesis_coord,
cds_frame : str
"local" or "galactic", it's the Compton data space (CDS) frame of the data, bkg_model and the response. In other words, they should have the same cds frame .
ts_nside : int
The nside of the ts map.
The nside of the ts map.
ts_scheme : str
The scheme of the Ts map.
Expand All @@ -315,11 +318,9 @@ def fast_ts_fit(hypothesis_coord,
"""

start_fast_ts_fit = time.time()

# get the pix number of the ts map
data_array = np.zeros(hp.nside2npix(ts_nside))
ts_temp = HealpixMap(data = data_array, scheme = ts_scheme, coordsys = "galactic")
pix = ts_temp.ang2pix(hypothesis_coord)

# get the indices of the pixels to fit
pix = hp.ang2pix(nside = ts_nside, theta = hypothesis_coord.l.deg, phi = hypothesis_coord.b.deg, lonlat = True)

# get the expected counts in the flattened cds array
start_ei_cds_array = time.time()
Expand All @@ -342,15 +343,15 @@ def fast_ts_fit(hypothesis_coord,
return [pix, result[0], result[1], result[2], result[3], result[4], time_ei_cds_array, time_fit, time_fast_ts_fit]


def parallel_ts_fit(self, hypothesis_coords, energy_channel, spectrum, ts_scheme = "RING", start_method = "fork", cpu_cores = None):
def parallel_ts_fit(self, hypothesis_coords, energy_channel, spectrum, ts_scheme = "RING", start_method = "fork", cpu_cores = None, ts_nside = None):

"""
Perform parallel computation on all the hypothesis coordinates.
Parameters
----------
hypothesis_coords : list
A list of the hypothesis coordinates
A list of the hypothesis coordinates to fit
energy_channel : list
the energy channel you want to use: [lower_channel, upper_channel]. lower_channel is inclusive while upper_channel is exclusive.
spectrum : astromodels.functions
Expand All @@ -361,15 +362,18 @@ def parallel_ts_fit(self, hypothesis_coords, energy_channel, spectrum, ts_scheme
The starting method of the parallel computation (the default is "fork", which implies using the fork method to start parallel computation).
cpu_cores : int, optional
The number of cpu cores you wish to use for the parallel computation (the default is `None`, which implies using all the available number of cores -1 to perform the parallel computation).
ts_nside : int, optional
The nside of the ts map. This must be given if the number of hypothesis_coords isn't equal to the number of pixels of the total ts map, which means that you fit only a portion of the total ts map. (the default is `None`, which means that you fit the full ts map).
Returns
-------
results : numpy.ndarray
The result of the ts fit over all the hypothesis coordinates.
"""

# decide the ts_nside from the list of hypothesis coordinates
ts_nside = hp.npix2nside(len(hypothesis_coords))
# decide the ts_nside from the list of hypothesis coordinates if not given
if ts_nside == None:
ts_nside = hp.npix2nside(len(hypothesis_coords))

# get the flattened data_cds_array
data_cds_array = FastTSMap.get_cds_array(self._data, energy_channel).flatten()
Expand All @@ -390,10 +394,10 @@ def parallel_ts_fit(self, hypothesis_coords, energy_channel, spectrum, ts_scheme
# if you don't specify the number of cpu cores to use or the specified number of cpu cores is the same as the total number of cores you have
# it will use the [total_cores - 1] number of cores to run the parallel computation.
cores = total_cores - 1
print(f"You have total {total_cores} CPU cores, using {cores} CPU cores for parallel computation.")
logger.info(f"You have total {total_cores} CPU cores, using {cores} CPU cores for parallel computation.")
else:
cores = cpu_cores
print(f"You have total {total_cores} CPU cores, using {cores} CPU cores for parallel computation.")
logger.info(f"You have total {total_cores} CPU cores, using {cores} CPU cores for parallel computation.")

start = time.time()
multiprocessing.set_start_method(start_method, force = True)
Expand All @@ -409,7 +413,7 @@ def parallel_ts_fit(self, hypothesis_coords, energy_channel, spectrum, ts_scheme

elapsed_seconds = end - start
elapsed_minutes = elapsed_seconds/60
print(f"The time used for the parallel TS map computation is {elapsed_minutes} minutes")
logger.info(f"The time used for the parallel TS map computation is {elapsed_minutes} minutes")

results = np.array(results) # turn to a numpy array
results = results[results[:, 0].argsort()] # arrange the order by the pixel numbering
Expand Down Expand Up @@ -447,6 +451,7 @@ def _plot_ts(ts_array, skycoord = None, containment = None, save_plot = False, s
lon = skycoord.l.deg
lat = skycoord.b.deg


# get the ts value
m_ts = ts_array

Expand All @@ -462,14 +467,14 @@ def _plot_ts(ts_array, skycoord = None, containment = None, save_plot = False, s
hp.mollview(m_ts[:], max = max_ts, min = max_ts-critical, title = f"Containment {percentage}%", coord = "G", hold = True)
elif containment == None:
hp.mollview(m_ts[:], coord = "G", hold = True)



if skycoord != None:
hp.projscatter(lon, lat, marker = "x", linewidths = 0.5, lonlat=True, coord = "G", label = f"True location at l={lon}, b={lat}", color = "fuchsia")
hp.projscatter(0, 0, marker = "o", linewidths = 0.5, lonlat=True, coord = "G", color = "red")
hp.projtext(350, 0, "(l=0, b=0)", lonlat=True, coord = "G", color = "red")

if save_plot == True:

fig.savefig(Path(save_dir)/save_name, dpi = dpi)

return
Expand Down Expand Up @@ -498,6 +503,7 @@ def plot_ts(self, ts_array = None, skycoord = None, containment = None, save_plo
if ts_array is not None:

FastTSMap._plot_ts(ts_array = ts_array, skycoord = skycoord, containment = containment,

save_plot = save_plot, save_dir = save_dir, save_name = save_name, dpi = dpi)

else:
Expand Down Expand Up @@ -533,6 +539,6 @@ def show_memory_info(hint):

info = p.memory_full_info()
memory = info.uss / 1024. / 1024
print('{} memory used: {} MB'.format(hint, memory))
logger.info('{} memory used: {} MB'.format(hint, memory))


Empty file.
Loading

0 comments on commit a338c9f

Please sign in to comment.