diff --git a/docs/source/conf.py b/docs/source/conf.py index 5d6dfa444..0f515871f 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -124,6 +124,7 @@ (r"py:.*", r".*numpy._typing._array_like._ScalarType_co.*"), (r"py:.*", r".*idex.l1a.TRIGGER_DESCRIPTION.*"), (r"py:.*", r".*idex.l1b.TriggerOrigin.*"), + (r"py:.*", r".*idex.l1b.EventMessage.*"), (r"py:.*", r".*idex.l2a.BaselineNoiseTime.*"), (r"py:.*", r".*PacketProperties"), (r"py:.*", r".*.spice.geometry.SpiceBody.*"), diff --git a/imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml b/imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml index a2ce61c7f..664e57adb 100644 --- a/imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml +++ b/imap_processing/cdf/config/imap_idex_global_cdf_attrs.yaml @@ -16,10 +16,10 @@ imap_idex_l1a_sci: Logical_source_description: IMAP Mission IDEX Instrument Level-1A Weekly Data. imap_idex_l1a_msg: - <<: *instrument_base - Data_type: L1A_MSG>Level-1A Event Message Data - Logical_source: imap_idex_l1a_msg - Logical_source_description: IMAP Mission IDEX Instrument Level-1A Event Message Data. + <<: *instrument_base + Data_type: L1A_MSG>Level-1A Event Message Data + Logical_source: imap_idex_l1a_msg + Logical_source_description: IMAP Mission IDEX Instrument Level-1A Event Message Data. imap_idex_l1a_catlst: <<: *instrument_base @@ -34,10 +34,10 @@ imap_idex_l1b_sci: Logical_source_description: IMAP Mission IDEX Instrument Level-1B Weekly Data. imap_idex_l1b_msg: - <<: *instrument_base - Data_type: L1B_MSG>Level-1B Event Message Data - Logical_source: imap_idex_l1b_msg - Logical_source_description: IMAP Mission IDEX Instrument Level-1B Event Message Data. + <<: *instrument_base + Data_type: L1B_MSG>Level-1B Event Message Data + Logical_source: imap_idex_l1b_msg + Logical_source_description: IMAP Mission IDEX Instrument Level-1B Event Message Data. imap_idex_l1b_catlst: <<: *instrument_base diff --git a/imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml b/imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml index 4cf422b55..be5104ddc 100644 --- a/imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml +++ b/imap_processing/cdf/config/imap_idex_l1a_variable_attrs.yaml @@ -199,6 +199,7 @@ shcoarse: FIELDNAM: Secondary header coarse time LABLAXIS: Packet Generation Time (Coarse) UNITS: seconds + FILLVAL: 4294967295 shfine: <<: *trigger_base @@ -207,6 +208,7 @@ shfine: VALIDMAX: *max_uint16 LABLAXIS: Packet Generation Time (Fine) UNITS: seconds + FILLVAL: 65535 messages: <<: *string_base diff --git a/imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml b/imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml index 0db4a6b75..e65b69da0 100644 --- a/imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml +++ b/imap_processing/cdf/config/imap_idex_l1b_variable_attrs.yaml @@ -89,6 +89,28 @@ trigger_origin: FIELDNAM: Trigger Origin CATDESC: Trigger Origin of the event. +pulser_on: + <<: *trigger_base + FIELDNAM: Pulser On + CATDESC: Pulser state flag derived from message events (0=off, 1=on). + LABLAXIS: Pulser On + FORMAT: I1 + FILLVAL: 255 + VAR_TYPE: support_data + VALIDMIN: 0 + VALIDMAX: 1 + +science_on: + <<: *trigger_base + FIELDNAM: Science On + CATDESC: Science acquisition state flag derived from message events (0=off, 1=on). + LABLAXIS: Science On + FORMAT: I1 + FILLVAL: 255 + VAR_TYPE: support_data + VALIDMIN: 0 + VALIDMAX: 1 + tof_high: <<: *l1b_tof_base CATDESC: Time of flight waveform on the high-gain channel diff --git a/imap_processing/cli.py b/imap_processing/cli.py index 67b4a7372..68da35fdd 100644 --- a/imap_processing/cli.py +++ b/imap_processing/cli.py @@ -1040,10 +1040,11 @@ def do_processing( science_files = dependencies.get_file_paths(source="idex") datasets = PacketParser(science_files[0]).data elif self.data_level == "l1b": - if len(dependency_list) != 3: + n_expected_deps = 3 if self.descriptor == "sci-1week" else 1 + if len(dependency_list) != n_expected_deps: raise ValueError( - f"Unexpected dependencies found for IDEX L1B:" - f"{dependency_list}. Expected only three dependencies." + f"Unexpected dependencies found for IDEX L1B {self.descriptor}:" + f"{dependency_list}. Expected only {n_expected_deps} dependencies." ) # get CDF file science_files = dependencies.get_file_paths(source="idex") @@ -1055,7 +1056,7 @@ def do_processing( raise ValueError("No science files found for IDEX L1B processing.") latest_file = max(science_datasets, key=lambda ds: ds["epoch"].data[0]) # process data - datasets = [idex_l1b(latest_file)] + datasets = [idex_l1b(latest_file, self.descriptor)] elif self.data_level == "l2a": if len(dependency_list) != 3: raise ValueError( diff --git a/imap_processing/idex/evt_msg_decode_utils.py b/imap_processing/idex/evt_msg_decode_utils.py index c2e994840..ec3dff882 100644 --- a/imap_processing/idex/evt_msg_decode_utils.py +++ b/imap_processing/idex/evt_msg_decode_utils.py @@ -14,7 +14,7 @@ def render_event_template( Produce an event message string by replacing placeholders with parameter values. Example template: - "Event {p0} occurred with value {p1+2:dictName}" + "Event {p0} occurred with value {p1+2|dictName}" This would replace {p0} with the hex value of params[0], and replace {p1+2:dictName} with the combined hex value of params[1] and params[2] (treated as big-endian bytes) diff --git a/imap_processing/idex/idex_constants.py b/imap_processing/idex/idex_constants.py index 0f6d0e8f1..176971aea 100644 --- a/imap_processing/idex/idex_constants.py +++ b/imap_processing/idex/idex_constants.py @@ -88,11 +88,3 @@ class ConversionFactors(float, Enum): # Define the pointing reference frame for IDEX IDEX_EVENT_REFERENCE_FRAME = SpiceFrame.ECLIPJ2000 - - -class IDEXEvtAcquireCodes(IntEnum): - """Create ENUM for event message ints that signify science acquire events.""" - - ACQSETUP = 2 - ACQ = 3 - CHILL = 5 diff --git a/imap_processing/idex/idex_l1a.py b/imap_processing/idex/idex_l1a.py index 4d6b06790..3cc053a67 100644 --- a/imap_processing/idex/idex_l1a.py +++ b/imap_processing/idex/idex_l1a.py @@ -135,12 +135,14 @@ def _create_evt_msg_data(self, data: xr.Dataset) -> xr.Dataset: data_vars={ "epoch": xr.DataArray(epoch, name="epoch", dims=["epoch"]), "shfine": xr.DataArray( - data["shfine"].data, dims=["epoch"], attrs=data["shfine"].attrs + data["shfine"].data, + dims=["epoch"], + attrs=self.idex_attrs.get_variable_attributes("shfine"), ), "shcoarse": xr.DataArray( data["shcoarse"].data, dims=["epoch"], - attrs=data["shcoarse"].attrs, + attrs=self.idex_attrs.get_variable_attributes("shcoarse"), ), }, attrs=self.idex_attrs.get_global_attributes("imap_idex_l1a_msg"), diff --git a/imap_processing/idex/idex_l1b.py b/imap_processing/idex/idex_l1b.py index c271af1f8..b51a4a166 100644 --- a/imap_processing/idex/idex_l1b.py +++ b/imap_processing/idex/idex_l1b.py @@ -45,6 +45,19 @@ logger = logging.getLogger(__name__) +class EventMessage(Enum): + """Enum class for event messages.""" + + PULSER_ON = "SEQ success (len=0x0580, opCodeLCDictionary(enstim))" + PULSER_OFF = "SEQ success (len=0x0580, opCodeLCDictionary(susprel))" + SCIENCE_ON = ( + "SCI state change: sciState16Dictionary(ACQSETUP) ==> sciState16Dictionary(ACQ)" + ) + SCIENCE_OFF = ( + "SCI state change: sciState16Dictionary(ACQ) ==> sciState16Dictionary(CHILL)" + ) + + class TriggerOrigin(IntEnum): """Enum class for event trigger origins.""" @@ -104,9 +117,96 @@ def get_mode_label(mode: int, channel: str) -> str: return f"{channel.upper()}{TriggerMode(mode).name}" -def idex_l1b(l1a_dataset: xr.Dataset) -> xr.Dataset: +def idex_l1b(l1a_dataset: xr.Dataset, descriptor: str) -> xr.Dataset: + """ + Process IDEX l1a data to create l1b data products based on the descriptor. + + Parameters + ---------- + l1a_dataset : xarray.Dataset + IDEX L1a dataset to process. + descriptor : str + Descriptor to determine the type of l1b processing to perform. E.g. "sci-1week" + or "msg". + + Returns + ------- + l1b_dataset : xarray.Dataset + The``xarray`` dataset containing the processed data and supporting metadata. + """ + if descriptor.startswith("sci"): + return idex_l1b_science(l1a_dataset) + elif descriptor.startswith("msg"): + return idex_l1b_msg(l1a_dataset) + else: + raise ValueError(f"Unsupported descriptor: {descriptor}") + + +def idex_l1b_msg(l1a_dataset: xr.Dataset) -> xr.Dataset: + """ + Will process IDEX l1a msg data. + + Parameters + ---------- + l1a_dataset : xarray.Dataset + IDEX L1a dataset to process. + + Returns + ------- + l1b_dataset : xarray.Dataset + The``xarray`` dataset containing the msg housekeeping data and + supporting metadata. + """ + logger.info( + f"Running IDEX L1B MSG processing on dataset: " + f"{l1a_dataset.attrs['Logical_source']}" + ) + # create the attribute manager for this data level + idex_attrs = get_idex_attrs("l1b") + # set up a dataset with only epoch. + l1b_dataset = setup_dataset(l1a_dataset, [], idex_attrs, data_vars=None) + l1b_dataset.attrs = idex_attrs.get_global_attributes("imap_idex_l1b_msg") + # Compute science_on and pulser_on variables based on the event message. The + # "science_on" variable indicates when the science data collection is turned on or + # off and the "pulser_on" variable indicates when the pulser is turned on or off. + # The following logic is applied to determine the pulser_on status. + # enstim → set pulser_on = 1 + # susprel AND the previous message was enstim → set pulser_on = 0 + # susprel but previous message was NOT enstim → pulser_on stays whatever it was + l1a_messages = l1a_dataset.messages.values + # Set science_on to 1 when science is on and 0 when it is off. 255 otherwise. + science_on = np.where(l1a_messages == EventMessage.SCIENCE_ON.value, 1, 255) + science_on[l1a_messages == EventMessage.SCIENCE_OFF.value] = 0 + # Find indices where there are consecutive PULSER_ON followed by PULSER_OFF + # messages. These are the only cases where we should set pulser_on to 1 and 0. + # Compare the messages by shifting the pulser off messages back by one and looking + # for matching overlaps. + consecutive_pulser_on_off = np.where( + (l1a_messages[:-1] == EventMessage.PULSER_ON.value) + & (l1a_messages[1:] == EventMessage.PULSER_OFF.value) + )[0] + pulser_on = np.full(len(l1a_messages), 255) # initialize with 255 (unknown) + pulser_on[consecutive_pulser_on_off] = 1 + pulser_on[consecutive_pulser_on_off + 1] = 0 + l1b_dataset["pulser_on"] = xr.DataArray( + data=pulser_on, + dims="epoch", + name="pulser_on", + attrs=idex_attrs.get_variable_attributes("pulser_on"), + ) + l1b_dataset["science_on"] = xr.DataArray( + data=science_on, + dims="epoch", + name="science_on", + attrs=idex_attrs.get_variable_attributes("science_on"), + ) + logger.info("IDEX L1B MSG data processing completed.") + return l1b_dataset + + +def idex_l1b_science(l1a_dataset: xr.Dataset) -> xr.Dataset: """ - Will process IDEX l1a data to create l1b data products. + Will process IDEX l1a science data. Parameters ---------- diff --git a/imap_processing/idex/idex_l2b.py b/imap_processing/idex/idex_l2b.py index 530bbd5bc..0fb193922 100644 --- a/imap_processing/idex/idex_l2b.py +++ b/imap_processing/idex/idex_l2b.py @@ -12,12 +12,13 @@ l0_file = "imap_processing/tests/idex/imap_idex_l0_raw_20231218_v001.pkts" l0_file_hk = "imap_processing/tests/idex/imap_idex_l0_raw_20250108_v001.pkts" - l1a_data = PacketParser(l0_file).data[0] - evt_data = PacketParser(l0_file_hk).data[0] - l1a_data, l1a_evt_data, l1b_evt_data = PacketParser(l0_file) - l1b_data = idex_l1b(l1a_data) + l1a_data, _ = PacketParser(l0_file) + _, l1a_msg_data = PacketParser(l0_file_hk) + msg_data_l1b = idex_l1b(msg_data_l1a, "msg") + l1b_data = idex_l1b(l1a_data, "sci-1week") + l1a_data = idex_l2a(l1b_data) - l2b_and_l2c_datasets = idex_l2b(l2a_data, [evt_data]) + l2b_and_l2c_datasets = idex_l2b(l2a_data, [msg_data_l1b]) write_cdf(l2b_and_l2c_datasets[0]) write_cdf(l2b_and_l2c_datasets[1]) """ @@ -38,7 +39,6 @@ IDEX_EVENT_REFERENCE_FRAME, IDEX_SPACING_DEG, SECONDS_IN_DAY, - IDEXEvtAcquireCodes, ) from imap_processing.idex.idex_utils import get_idex_attrs from imap_processing.spice.time import epoch_to_doy, et_to_datetime64, ttj2000ns_to_et @@ -84,7 +84,7 @@ def idex_l2b( - l2a_datasets: list[xr.Dataset], evt_datasets: list[xr.Dataset] + l2a_datasets: list[xr.Dataset], msg_data_l1b: list[xr.Dataset] ) -> list[xr.Dataset]: """ Will process IDEX l2a data to create l2b and l2c data products. @@ -96,8 +96,8 @@ def idex_l2b( ---------- l2a_datasets : list[xarray.Dataset] IDEX L2a datasets to process. - evt_datasets : list[xarray.Dataset] - List of IDEX housekeeping event message datasets. + msg_data_l1b : list[xarray.Dataset] + List of IDEX L1B event message datasets. Returns ------- @@ -113,8 +113,9 @@ def idex_l2b( # create the attribute manager for this data level idex_l2b_attrs = get_idex_attrs("l2b") idex_l2c_attrs = get_idex_attrs("l2c") - evt_dataset = xr.concat(evt_datasets, dim="epoch") - + msg_ds = ( + xr.concat(msg_data_l1b, dim="epoch").sortby("epoch").drop_duplicates("epoch") + ) # Concat all the l2a datasets together l2a_dataset = xr.concat(l2a_datasets, dim="epoch") epoch_doy = epoch_to_doy(l2a_dataset["epoch"].data) @@ -130,10 +131,14 @@ def idex_l2b( counts_by_mass_map, daily_epoch, ) = compute_counts_by_charge_and_mass(l2a_dataset, epoch_doy_unique) - # Get science acquisition start and stop times - _, evt_time, evt_values = get_science_acquisition_timestamps(evt_dataset) + # Filter the message dataset to only include science acquisition on/off events. + # (ignore fill vals) + science_on_msg_ds = msg_ds.isel(epoch=np.isin(msg_ds.science_on, [0, 1])) + msg_time = science_on_msg_ds["epoch"].data + msg_values = science_on_msg_ds["science_on"].data + # Get science acquisition percentage for each day - daily_on_percentage = get_science_acquisition_on_percentage(evt_time, evt_values) + daily_on_percentage = get_science_acquisition_on_percentage(msg_time, msg_values) ( rate_by_charge, rate_by_mass, @@ -164,7 +169,7 @@ def idex_l2b( common_vars = { "on_off_times": xr.DataArray( name="on_off_times", - data=evt_time, + data=msg_time, dims="on_off_times", attrs=idex_l2b_attrs.get_variable_attributes( "on_off_times", check_schema=False @@ -172,7 +177,7 @@ def idex_l2b( ), "on_off_events": xr.DataArray( name="on_off_events", - data=np.asarray(evt_values, dtype=np.uint8), + data=np.asarray(msg_values, dtype=np.uint8), dims="on_off_times", attrs=idex_l2b_attrs.get_variable_attributes( "on_off_events", check_schema=False @@ -589,79 +594,17 @@ def bin_spin_phases(spin_phases: xr.DataArray) -> np.ndarray: return np.asarray(bin_indices) -def get_science_acquisition_timestamps( - evt_dataset: xr.Dataset, -) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - """ - Get the science acquisition start and stop times and messages from the event data. - - Parameters - ---------- - evt_dataset : xarray.Dataset - Contains IDEX event message data. - - Returns - ------- - event_logs : np.ndarray - Array containing science acquisition start and stop events messages. - event_timestamps : np.ndarray - Array containing science acquisition start and stop timestamps. - event_values : np.ndarray - Array containing values indicating if the event is a start (1) or - stop (0). - """ - # Sort the event dataset by the epoch time. Drop duplicates - evt_dataset = evt_dataset.sortby("epoch").drop_duplicates("epoch") - # First find indices of the state change events - sc_indices = np.where(evt_dataset["elid_evtpkt"].data == "SCI_STE")[0] - event_logs = [] - event_timestamps = [] - event_values = [] - # Get the values of the state change events - val1 = ( - evt_dataset["el1par_evtpkt"].data[sc_indices] << 8 - | evt_dataset["el2par_evtpkt"].data[sc_indices] - ) - val2 = ( - evt_dataset["el3par_evtpkt"].data[sc_indices] << 8 - | evt_dataset["el4par_evtpkt"].data[sc_indices] - ) - epochs = evt_dataset["epoch"][sc_indices].data - # Now the state change values and check if it is either a science - # acquisition start or science acquisition stop event. - for v1, v2, epoch in zip(val1, val2, epochs, strict=False): - # An "acquire" start will have val1=ACQSETUP and val2=ACQ - # An "acquire" stop will have val1=ACQ and val2=CHILL - if (v1, v2) == (IDEXEvtAcquireCodes.ACQSETUP, IDEXEvtAcquireCodes.ACQ): - event_logs.append("SCI state change: ACQSETUP to ACQ") - event_timestamps.append(epoch) - event_values.append(1) - elif (v1, v2) == (IDEXEvtAcquireCodes.ACQ, IDEXEvtAcquireCodes.CHILL): - event_logs.append("SCI state change: ACQ to CHILL") - event_timestamps.append(epoch) - event_values.append(0) - - logger.info( - f"Found science acquisition events: {event_logs} at times: {event_timestamps}" - ) - return ( - np.asarray(event_logs), - np.asarray(event_timestamps), - np.asarray(event_values), - ) - - def get_science_acquisition_on_percentage( - evt_time: NDArray, evt_values: NDArray + msg_time: NDArray, msg_values: NDArray ) -> dict: """ Calculate the percentage of time science acquisition was occurring for each day. Parameters ---------- - evt_time : np.ndarray + msg_time : np.ndarray Array of timestamps for science acquisition start and stop events. - evt_values : np.ndarray + msg_values : np.ndarray Array of values indicating if the event is a start (1) or stop (0). Returns @@ -670,7 +613,7 @@ def get_science_acquisition_on_percentage( Percentages of time the instrument was in science acquisition mode for each day of year. """ - if len(evt_time) == 0: + if len(msg_time) == 0: logger.warning( "No science acquisition events found in event dataset. Returning empty " "uptime percentages. All rate variables will be set to -1." @@ -680,17 +623,17 @@ def get_science_acquisition_on_percentage( daily_totals: collections.defaultdict = defaultdict(timedelta) daily_on: collections.defaultdict = defaultdict(timedelta) # Convert epoch event times to datetime - dates = et_to_datetime64(ttj2000ns_to_et(evt_time)).astype(datetime) + dates = et_to_datetime64(ttj2000ns_to_et(msg_time)).astype(datetime) # Simulate an event at the start of the first day. start_of_first_day = dates[0].replace(hour=0, minute=0, second=0, microsecond=0) # Assume that the state at the start of the day is the opposite of what the first # state is. - state_at_start = 0 if evt_values[0] == 1 else 1 + state_at_start = 0 if msg_values[0] == 1 else 1 dates = np.insert(dates, 0, start_of_first_day) - evt_values = np.insert(evt_values, 0, state_at_start) + msg_values = np.insert(msg_values, 0, state_at_start) for i in range(len(dates)): start = dates[i] - state = evt_values[i] + state = msg_values[i] if i == len(dates) - 1: # If this is the last event, set the "end" value the end of the day. end = (start + timedelta(days=1)).replace( diff --git a/imap_processing/tests/idex/conftest.py b/imap_processing/tests/idex/conftest.py index 96f5c98b8..fbeb38e49 100644 --- a/imap_processing/tests/idex/conftest.py +++ b/imap_processing/tests/idex/conftest.py @@ -20,7 +20,7 @@ L1B_EXAMPLE_FILE = TEST_DATA_PATH / "imap_idex_l1b_sci_20231218_v001.h5" L2A_CDF = TEST_DATA_PATH / "imap_idex_l2a_sci-1week_20251017_v001.cdf" -L1B_MSG_CDF = TEST_DATA_PATH / "imap_idex_l1b_evt_20250108_v001.cdf" +L1B_MSG_CDF = TEST_DATA_PATH / "imap_idex_l1b_msg_20250108_v001.cdf" pytestmark = pytest.mark.external_test_data @@ -51,7 +51,7 @@ def decom_test_data_catlst() -> xr.Dataset: @pytest.fixture def decom_test_data_msg() -> xr.Dataset: - """``xarray`` dataset containing the raw and derived event log data. + """``xarray`` dataset containing the raw event message data. Returns ------- @@ -61,6 +61,18 @@ def decom_test_data_msg() -> xr.Dataset: return PacketParser(TEST_L0_FILE_MSG).data[0] +@pytest.fixture +def test_l1b_msg(decom_test_data_msg) -> xr.Dataset: + """``xarray`` dataset containing the l1b msg data. + + Returns + ------- + dataset : xarray.Dataset + ``xarray`` dataset containing the event log data. + """ + return idex_l1b(decom_test_data_msg, "msg") + + @pytest.fixture def l1a_example_data(_download_test_data): """ @@ -112,7 +124,7 @@ def l1b_dataset(mock_get_spice_data, decom_test_data_sci: xr.Dataset) -> xr.Data """ mock_get_spice_data.side_effect = get_spice_data_side_effect_func - dataset = idex_l1b(decom_test_data_sci) + dataset = idex_l1b(decom_test_data_sci, "sci-1week") return dataset diff --git a/imap_processing/tests/idex/test_idex_l1b.py b/imap_processing/tests/idex/test_idex_l1b.py index 2b95b28dc..21b482218 100644 --- a/imap_processing/tests/idex/test_idex_l1b.py +++ b/imap_processing/tests/idex/test_idex_l1b.py @@ -12,10 +12,12 @@ from imap_processing.cdf.utils import write_cdf from imap_processing.idex.idex_l1b import ( TRIGGER_LABELS, + EventMessage, TriggerOrigin, get_spice_data, get_trigger_mode_and_level, get_trigger_origin, + idex_l1b, unpack_instrument_settings, ) from imap_processing.idex.idex_utils import get_idex_attrs @@ -358,3 +360,46 @@ def test_validate_l1b_idex_data_variables( decimal=4, err_msg=warning, ) + + +def test_l1b_msg_processing(decom_test_data_msg: xr.Dataset): + """Verify that the MSG data is being processed correctly in the l1b processing. + + Parameters + ---------- + decom_test_data_msg : xr.Dataset + A dataset containing the MSG data produced by the l1a processing. + """ + msg_ds = decom_test_data_msg.copy() + # Set 2 consecutive events to have pulser on and pulser off + msg_ds.messages[2] = EventMessage.PULSER_ON.value + msg_ds.messages[3] = EventMessage.PULSER_OFF.value + # Set 2 to have a non-consecutive pulser on and pulser off to check that + # non-consecutive events are treated as non-valid pulser on and off events + msg_ds.messages[20] = EventMessage.PULSER_ON.value + msg_ds.messages[22] = EventMessage.PULSER_OFF.value + # Process the MSG data with the l1b function + test_l1b_msg = idex_l1b(msg_ds, "msg") + expected_vars = [ + "epoch", + "pulser_on", + "science_on", + ] + for var in expected_vars: + assert var in test_l1b_msg, ( + f"The variable '{var}' is missing from the MSG dataset." + ) + + # Check that the pulser_on variable is correct + expected_pulser_on = np.ones_like(test_l1b_msg["pulser_on"]) * 255 + # The pulser_on variable should be 1 for the 2nd and 0 for the 3rd event, and + # 255 for all other events + expected_pulser_on[2] = 1 + expected_pulser_on[3] = 0 + np.testing.assert_array_equal(test_l1b_msg["pulser_on"].data, expected_pulser_on) + # Check that the science_on variable is correct + expected_science_on = np.ones_like(test_l1b_msg["pulser_on"]) * 255 + # The science_on variable should be 1 for the 10th event and 0 for the 11th event + expected_science_on[10] = 1 + expected_science_on[11] = 0 + np.testing.assert_array_equal(test_l1b_msg["science_on"].data, expected_science_on) diff --git a/imap_processing/tests/idex/test_idex_l2a.py b/imap_processing/tests/idex/test_idex_l2a.py index 5b4517de8..9dd96feeb 100644 --- a/imap_processing/tests/idex/test_idex_l2a.py +++ b/imap_processing/tests/idex/test_idex_l2a.py @@ -49,7 +49,7 @@ def l2a_dataset( "imap_processing.idex.idex_l1b.get_spice_data", return_value={"spin_phase": spin_phase_angles}, ): - dataset = idex_l2a(idex_l1b(decom_test_data_sci), ancillary_files) + dataset = idex_l2a(idex_l1b(decom_test_data_sci, "sci-1week"), ancillary_files) return dataset diff --git a/imap_processing/tests/idex/test_idex_l2b.py b/imap_processing/tests/idex/test_idex_l2b.py index 3a9605aa0..721c27452 100644 --- a/imap_processing/tests/idex/test_idex_l2b.py +++ b/imap_processing/tests/idex/test_idex_l2b.py @@ -1,13 +1,11 @@ """Tests the L2b processing for IDEX data""" -from unittest import mock - import numpy as np import pytest import xarray as xr from numpy.testing import assert_array_equal -from imap_processing.cdf.utils import load_cdf, write_cdf +from imap_processing.cdf.utils import write_cdf from imap_processing.idex.idex_constants import ( FG_TO_KG, IDEX_SPACING_DEG, @@ -25,11 +23,10 @@ get_science_acquisition_on_percentage, idex_l2b, ) -from imap_processing.tests.idex.conftest import L1B_MSG_CDF @pytest.fixture -def l2b_and_l2c_datasets(l2a_dataset: xr.Dataset) -> list[xr.Dataset]: +def l2b_and_l2c_datasets(l2a_dataset: xr.Dataset, test_l1b_msg) -> list[xr.Dataset]: """Return a ``xarray`` dataset containing test data. Returns @@ -37,9 +34,8 @@ def l2b_and_l2c_datasets(l2a_dataset: xr.Dataset) -> list[xr.Dataset]: datasets : list[xr.Dataset] A list of ``xarray`` datasets containing the test data for L2B and L2C. """ - l1b_msg_dataset = load_cdf(L1B_MSG_CDF) l1b_msg_dataset2 = ( - l1b_msg_dataset.copy() + test_l1b_msg.copy() ) # Add a second dataset with different epoch values for testing l2a_dataset2 = ( l2a_dataset.copy() @@ -47,7 +43,7 @@ def l2b_and_l2c_datasets(l2a_dataset: xr.Dataset) -> list[xr.Dataset]: l1b_msg_dataset2["epoch"] = l1b_msg_dataset2["epoch"] + NANOSECONDS_IN_DAY l2a_dataset2["epoch"] = l2a_dataset2["epoch"] + NANOSECONDS_IN_DAY datasets = idex_l2b( - [l2a_dataset, l2a_dataset2], [l1b_msg_dataset, l1b_msg_dataset2] + [l2a_dataset, l2a_dataset2], [test_l1b_msg.copy(), l1b_msg_dataset2] ) return datasets @@ -176,62 +172,35 @@ def test_bin_spin_phases_warning(caplog): ) in caplog.text -# TODO uncomment tests below when the event message l1b products are ready -# def test_science_acquisition_times(decom_test_data_msg: xr.Dataset): -# """Tests that the expected science acquisition times and messages are present. -# -# Parameters -# ---------- -# decom_test_data_msg : xr.Dataset -# A ``xarray`` dataset containing the test data -# """ -# logs, times, vals = get_science_acquisition_timestamps(decom_test_data_msg) -# # For this example event message dataset we expect science acquisition events. -# assert len(logs) == 2 -# assert len(times) == 2 -# assert len(vals) == 2 -# # The first event message is the start of the science acquisition. -# assert logs[0] == "SCI state change: ACQSETUP to ACQ" -# # The second event message is the end of the science acquisition. -# assert logs[1] == "SCI state change: ACQ to CHILL" -# -# # assert the values are correct -# np.testing.assert_array_equal(vals, [1, 0]) -# -# -# def test_get_science_acquisition_on_percentage(decom_test_data_msg: xr.Dataset): -# """Test the function that calculates the percentage of uptime.""" -# _, msg_time, msg_event = get_science_acquisition_timestamps(decom_test_data_msg) -# on_percentages = get_science_acquisition_on_percentage(msg_time, msg_event) -# # We expect 1 DOY and ~87% uptime for the science acquisition. -# assert len(on_percentages) == 1 -# # The DOY should be 8 for this test dataset. -# assert on_percentages[8] < 1 -# -# msg_ds = decom_test_data_msg[1].copy() -# msg_ds_shifted = msg_ds.copy() -# msg_ds_shifted["epoch"] = msg_ds["epoch"] + NANOSECONDS_IN_DAY -# combined_ds = xr.concat([msg_ds, msg_ds_shifted], dim="epoch") -# # expect a second DOY. -# _, msg_time, msg_event = get_science_acquisition_timestamps(combined_ds) -# on_percentages = get_science_acquisition_on_percentage(msg_time, msg_event) -# # We expect 2 DOYs -# assert len(on_percentages) == 2 -# # The uptime should be less than 1% for both -# assert on_percentages[8] < 1 -# assert on_percentages[9] < 1 # The uptime should be less than 1% -# +def test_get_science_acquisition_on_percentage(test_l1b_msg: xr.Dataset): + """Test the function that calculates the percentage of uptime.""" + test_l1b_msg = test_l1b_msg.isel(epoch=np.isin(test_l1b_msg.science_on, [0, 1])) + msg_time = test_l1b_msg.epoch.data + msg_event = test_l1b_msg.science_on.data + on_percentages = get_science_acquisition_on_percentage(msg_time, msg_event) + # We expect 1 DOY with less than 1% uptime for the science acquisition. + assert len(on_percentages) == 1 + # The DOY should be 8 for this test dataset. + assert on_percentages[8] < 1 + + msg_ds = test_l1b_msg.copy() + msg_ds_shifted = msg_ds.copy() + msg_ds_shifted["epoch"] = msg_ds["epoch"] + NANOSECONDS_IN_DAY + combined_ds = xr.concat([msg_ds, msg_ds_shifted], dim="epoch") + # expect a second DOY. + msg_time = combined_ds.epoch.data + msg_event = combined_ds.science_on.data + on_percentages = get_science_acquisition_on_percentage(msg_time, msg_event) + # We expect 2 DOYs + assert len(on_percentages) == 2 + # The uptime should be less than 1% for both + assert on_percentages[8] < 1 + assert on_percentages[9] < 1 # The uptime should be less than 1% def test_get_science_acquisition_on_percentage_no_acquisition(caplog): """Test the function returns an empty dict when there is no science acquisition.""" - with mock.patch( - "imap_processing.idex.idex_l2b.get_science_acquisition_timestamps", - return_value=([], [], []), - ): - on_percentages = get_science_acquisition_on_percentage( - np.array([]), np.array([]) - ) + on_percentages = get_science_acquisition_on_percentage(np.array([]), np.array([])) assert not on_percentages assert "No science acquisition events found" in caplog.text