Skip to content

Commit

Permalink
added housekeeping l1 fits files
Browse files Browse the repository at this point in the history
  • Loading branch information
ehsteve committed Oct 19, 2024
1 parent b7b91a0 commit 1628df4
Show file tree
Hide file tree
Showing 4 changed files with 137 additions and 66 deletions.
8 changes: 8 additions & 0 deletions docs/user-guide/level0.rst
Original file line number Diff line number Diff line change
Expand Up @@ -66,3 +66,11 @@ pkttimes the packet time in seconds since EPOCH, also exists in PKT,
pktclock the packet time in clocks since EPOCH, also exists in PKT 32
======== ============================================================================================ ====

Level 0 spectrum files
----------------------
Summary spectra are created for 24 pixels at a regular cadence (normally every 10 s)
Each spectrum has a total of 512 energy bins.

Level 0 housekeeping files
--------------------------
These files contain housekeeping data as described in the housekeeping packet.
131 changes: 92 additions & 39 deletions padre_meddea/calibration/calibration.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from padre_meddea import log
from padre_meddea.io import file_tools

from padre_meddea.util.util import create_science_filename
from padre_meddea.util.util import create_science_filename, calc_time
from padre_meddea.io.file_tools import read_raw_file

__all__ = [
Expand Down Expand Up @@ -70,44 +70,14 @@ def process_file(filename: Path, overwrite=False) -> list:
this_str,
)

primary_hdr["DATEREF"] = (primary_hdr["DATE-BEG"], "Reference date")
primary_hdr["LEVEL"] = (0, "Data level of fits file")

# add processing information
primary_hdr["PRSTEP1"] = ("PROCESS Raw to L1", "Processing step type")
primary_hdr["PRPROC1"] = (
"padre_meddea.calibration.process",
"Name of procedure performing PRSTEP1",
)
primary_hdr["PRPVER1"] = (
padre_meddea.__version__,
"Version of procedure PRPROC1",
)
primary_hdr["PRLIB1A"] = (
"padre_meddea",
"Software library containing PRPROC1",
)
primary_hdr["PRVER1A"] = (padre_meddea.__version__, "Version of PRLIB1A")
repo = git.Repo(search_parent_directories=True)
primary_hdr["PRHSH1A"] = (
repo.head.object.hexsha,
"GIT commit hash for PRLIB1A",
)
primary_hdr["PRBRA1A"] = (
repo.active_branch.name,
"GIT/SVN repository branch of PRLIB1A",
)
commits = list(repo.iter_commits("main", max_count=1))
primary_hdr["PRVER1B"] = (
Time(commits[0].committed_datetime).fits,
"Date of last commit of PRLIB1B",
)
# primary_hdr["PRLOG1"] add log information, need to do this after the fact
# primary_hdr["PRENV1"] add information about processing env, need to do this after the fact

primary_hdr = add_process_info_to_header(primary_hdr)

# custom keywords
primary_hdr["DATATYPE"] = ("event_list", "Description of the data")
primary_hdr["ORIGAPID"] = (0xA0, "APID(s) of the originating data")
primary_hdr["ORIGAPID"] = (padre_meddea.APID["photon"], "APID(s) of the originating data")
primary_hdr["ORIGFILE"] = (file_path.name, "Originating file(s)")

# add common fits keywords
Expand Down Expand Up @@ -142,15 +112,54 @@ def process_file(filename: Path, overwrite=False) -> list:
hdul.writeto(path, overwrite=overwrite)

# Store the output file path in a list
output_files = [path]
output_files.append(path)
if parsed_data["housekeeping"] is not None:
hk_data = parsed_data["housekeeping"]
hk_data.meta["INSTRUME"] = "meddea"
# send data to AWS Timestream for Grafana dashboard
record_timeseries(hk_data, "housekeeping")
hk_table = Table(hk_data)
primary_hdr = fits.Header()
# fill in metadata
primary_hdr["DATE"] = (Time.now().fits, "FITS file creation date in UTC")
primary_hdr["LEVEL"] = (0, "Data level of fits file")
primary_hdr["DATATYPE"] = ("housekeeping", "Description of the data")
primary_hdr["ORIGAPID"] = (padre_meddea.APID["housekeeping"], "APID(s) of the originating data")
primary_hdr["ORIGFILE"] = (file_path.name, "Originating file(s)")
date_beg = calc_time(hk_data['timestamp'][0])
primary_hdr["DATEREF"] = (date_beg.fits, "Reference date")

if "CHECKSUM" in hk_data.colnames:
hk_data.remove_column("CHECKSUM")
# add processing information
primary_hdr = add_process_info_to_header(primary_hdr)

# add common fits keywords
fits_meta = read_fits_keyword_file(
padre_meddea._data_directory / "fits_keywords_primaryhdu.csv"
)
for row in fits_meta:
primary_hdr[row["keyword"]] = (row["value"], row["comment"])
hk_table['seqcount'] = hk_table["CCSDS_SEQUENCE_COUNT"]
colnames_to_remove = ["CCSDS_VERSION_NUMBER", "CCSDS_PACKET_TYPE", "CCSDS_SECONDARY_FLAG", "CCSDS_SEQUENCE_FLAG", "CCSDS_APID", "CCSDS_SEQUENCE_COUNT", "CCSDS_PACKET_LENGTH", "CHECKSUM", "time"]
for this_col in colnames_to_remove:
if this_col in hk_table.colnames:
hk_table.remove_column(this_col)

empty_primary = fits.PrimaryHDU(header=primary_hdr)
hk_hdu = fits.BinTableHDU(hk_table, name="HK")
hk_hdu.add_checksum()
hdul = fits.HDUList([empty_primary, hk_hdu])

path = create_science_filename(
time=date_beg,
level="l1",
descriptor="hk",
test=True,
version="0.1.0",
)
hdul.writeto(path, overwrite=overwrite)
output_files.append(path)



record_timeseries(hk_data, "housekeeping")

# calibrated_file = calibrate_file(data_filename)
# data_plot_files = plot_file(data_filename)
Expand All @@ -167,6 +176,50 @@ def raw_to_l0(filename: Path):
data = file_tools.read_raw_file(filename)


def add_process_info_to_header(header: fits.Header) -> fits.Header:
"""Add processing info metadata to fits header.
Parameters
----------
header : fits.Header
Returns
-------
header : fits.Header
"""
header["PRSTEP1"] = ("PROCESS Raw to L1", "Processing step type")
header["PRPROC1"] = (
"padre_meddea.calibration.process",
"Name of procedure performing PRSTEP1",
)
header["PRPVER1"] = (
padre_meddea.__version__,
"Version of procedure PRPROC1",
)
header["PRLIB1A"] = (
"padre_meddea",
"Software library containing PRPROC1",
)
header["PRVER1A"] = (padre_meddea.__version__, "Version of PRLIB1A")
repo = git.Repo(search_parent_directories=True)
header["PRHSH1A"] = (
repo.head.object.hexsha,
"GIT commit hash for PRLIB1A",
)
header["PRBRA1A"] = (
repo.active_branch.name,
"GIT/SVN repository branch of PRLIB1A",
)
commits = list(repo.iter_commits("main", max_count=1))
header["PRVER1B"] = (
Time(commits[0].committed_datetime).fits,
"Date of last commit of PRLIB1B",
)
# primary_hdr["PRLOG1"] add log information, need to do this after the fact
# primary_hdr["PRENV1"] add information about processing env, need to do this after the fact
return header


def get_calibration_file(time: Time) -> Path:
"""
Given a time, return the appropriate calibration file.
Expand Down
62 changes: 36 additions & 26 deletions padre_meddea/io/file_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,31 +82,41 @@ def read_fits(filename: Path):
Read a fits file.
"""
hdu = fits.open(filename)

if (hdu[0].header["LEVEL"] == 0) and (hdu[0].header["DATATYPE"] == "event_list"):
num_events = len(hdu["SCI"].data["seqcount"])
ph_times = calc_time(
hdu["sci"].data["pkttimes"],
hdu["sci"].data["pktclock"],
hdu["sci"].data["clocks"],
)
# TODO: protect in case of non-pixel channel
pixel = np.array(
[channel_to_pixel(this_chan) for this_chan in hdu["sci"].data["channel"]],
dtype=np.uint8,
)
event_list = TimeSeries(
time=ph_times,
data={
"atod": hdu["sci"].data["atod"],
"asic": hdu["sci"].data["asic"],
"channel": hdu["sci"].data["channel"],
"pixel": pixel,
"clocks": hdu["sci"].data["clocks"],
"pktnum": hdu["sci"].data["seqcount"],
},
)
event_list.sort()
event_list = read_fits_l0_event_list(filename) # do I need to close the file since it is being opened again right after this?
return event_list
else:
raise ValueError(F"File contents of {filename} not recogized.")


def read_fits_l0_event_list(filename: Path) -> TimeSeries:
"""
"""
hdu = fits.open(filename)
num_events = len(hdu["SCI"].data["seqcount"])
ph_times = calc_time(
hdu["sci"].data["pkttimes"],
hdu["sci"].data["pktclock"],
hdu["sci"].data["clocks"],
)
pixel = np.array(
[channel_to_pixel(this_chan) for this_chan in hdu["sci"].data["channel"]],
dtype=np.uint8,
)
event_list = TimeSeries(
time=ph_times,
data={
"atod": hdu["sci"].data["atod"],
"asic": hdu["sci"].data["asic"],
"channel": hdu["sci"].data["channel"],
"pixel": pixel,
"clocks": hdu["sci"].data["clocks"],
"pktnum": hdu["sci"].data["seqcount"],
},
)
event_list.sort()
return event_list


def parse_ph_packets(filename: Path):
Expand Down Expand Up @@ -273,9 +283,9 @@ def parse_hk_packets(filename: Path):
return None
packet_definition = packet_definition_hk()
pkt = ccsdspy.FixedLength(packet_definition)
hk_data = pkt.load(packet_bytes)
hk_data = pkt.load(packet_bytes, include_primary_header=True)
hk_timestamps = [
dt.timedelta(seconds=int(this_t)) + EPOCH for this_t in hk_data["TIMESTAMP"]
dt.timedelta(seconds=int(this_t)) + EPOCH for this_t in hk_data["timestamp"]
]
hk_data = TimeSeries(time=hk_timestamps, data=hk_data)
return hk_data
Expand Down Expand Up @@ -336,7 +346,7 @@ def packet_definition_hk():
"""Return the packet definiton for the housekeeping packets."""
hk_table = ascii.read(padre_meddea._data_directory / "hk_packet_def.csv")
hk_table.add_index("name")
p = [PacketField(name="TIMESTAMP", data_type="uint", bit_length=32)]
p = [PacketField(name="timestamp", data_type="uint", bit_length=32)]
for this_hk in hk_table["name"]:
p += [PacketField(name=this_hk, data_type="uint", bit_length=16)]
p += [PacketField(name="CHECKSUM", data_type="uint", bit_length=16)]
Expand Down
2 changes: 1 addition & 1 deletion padre_meddea/util/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def create_science_filename(
return filename + FILENAME_EXTENSION


def calc_time(pkt_time_s, pkt_time_clk, ph_clk=0):
def calc_time(pkt_time_s, pkt_time_clk=0, ph_clk=0):
"""
Convert times to a Time object
"""
Expand Down

0 comments on commit 1628df4

Please sign in to comment.