diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 00000000000..d0c3b59f68d --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,15 @@ +# These are supported funding model platforms + +# github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +# patreon: # Replace with a single Patreon username +open_collective: mne-python +# ko_fi: # Replace with a single Ko-fi username +# tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel +# community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +# liberapay: # Replace with a single Liberapay username +# issuehunt: # Replace with a single IssueHunt username +# lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry +# polar: # Replace with a single Polar username +# buy_me_a_coffee: # Replace with a single Buy Me a Coffee username +# thanks_dev: # Replace with a single thanks.dev username +# custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] diff --git a/doc/changes/dev/12847.bugfix.rst b/doc/changes/dev/12847.bugfix.rst new file mode 100644 index 00000000000..26301853842 --- /dev/null +++ b/doc/changes/dev/12847.bugfix.rst @@ -0,0 +1 @@ +Handle scenario where an Eyelink recording switched from binocular to monocular mode during a trial by `Scott Huberty`_ \ No newline at end of file diff --git a/mne/io/eyelink/_utils.py b/mne/io/eyelink/_utils.py index 421b967d0a8..0bd650ac8d8 100644 --- a/mne/io/eyelink/_utils.py +++ b/mne/io/eyelink/_utils.py @@ -47,20 +47,25 @@ def _parse_eyelink_ascii( ): # ======================== Parse ASCII File ========================= raw_extras = dict() - raw_extras.update(_parse_recording_blocks(fname)) - raw_extras.update(_get_metadata(raw_extras)) raw_extras["dt"] = _get_recording_datetime(fname) - _validate_data(raw_extras) + data_blocks: list[dict] = _parse_recording_blocks(fname) + _validate_data(data_blocks) # ======================== Create DataFrames ======================== - raw_extras["dfs"] = _create_dataframes(raw_extras, apply_offsets) - del raw_extras["sample_lines"] # free up memory - # add column names to dataframes and set the dtype of each column - col_names, ch_names = _infer_col_names(raw_extras) - raw_extras["dfs"] = _assign_col_names(col_names, raw_extras["dfs"]) - raw_extras["dfs"] = _set_df_dtypes(raw_extras["dfs"]) # set dtypes for dataframes + # Process each block individually, then combine + processed_blocks = _create_dataframes(data_blocks, apply_offsets) + raw_extras["dfs"], ch_names = _combine_block_dataframes(processed_blocks) + del processed_blocks # free memory + for block in data_blocks: + del block["samples"] # remove samples from block to save memory + + first_block = data_blocks[0] + raw_extras["pos_unit"] = first_block["info"]["unit"] + raw_extras["sfreq"] = first_block["info"]["sfreq"] + raw_extras["first_timestamp"] = first_block["info"]["first_timestamp"] + raw_extras["n_blocks"] = len(data_blocks) # if HREF data, convert to radians - if "HREF" in raw_extras["rec_info"]: + if raw_extras["pos_unit"] == "HREF": raw_extras["dfs"]["samples"] = _convert_href_samples( raw_extras["dfs"]["samples"] ) @@ -75,7 +80,7 @@ def _parse_eyelink_ascii( ) # Convert timestamps to seconds for df in raw_extras["dfs"].values(): - df = _convert_times(df, raw_extras["first_samp"]) + df = _convert_times(df, raw_extras["first_timestamp"]) # Find overlaps between left and right eye events if find_overlaps: for key in raw_extras["dfs"]: @@ -117,59 +122,104 @@ def _parse_recording_blocks(fname): "BUTTON": [], "PUPIL": [], } + data_blocks = [] is_recording_block = False for line in file: if line.startswith("START"): # start of recording block is_recording_block = True + # Initialize container for new block data + current_block = { + "samples": [], + "events": { + "START": [], + "END": [], + "SAMPLES": [], + "EVENTS": [], + "ESACC": [], + "EBLINK": [], + "EFIX": [], + "MSG": [], + "INPUT": [], + "BUTTON": [], + "PUPIL": [], + }, + "info": None, + } if is_recording_block: tokens = line.split() if not tokens: continue # skip empty lines if tokens[0][0].isnumeric(): # Samples - data_dict["sample_lines"].append(tokens) - elif tokens[0] in data_dict["event_lines"].keys(): + current_block["samples"].append(tokens) + elif tokens[0] in current_block["events"].keys(): if _is_sys_msg(line): continue # system messages don't need to be parsed. event_key, event_info = tokens[0], tokens[1:] - data_dict["event_lines"][event_key].append(event_info) + current_block["events"][event_key].append(event_info) if tokens[0] == "END": # end of recording block + current_block["info"] = _get_metadata(current_block) + data_blocks.append(current_block) is_recording_block = False - if not data_dict["sample_lines"]: # no samples parsed + if not data_blocks: # no samples parsed raise ValueError(f"Couldn't find any samples in {fname}") - return data_dict + return data_blocks -def _validate_data(raw_extras): +def _validate_data(data_blocks: list): """Check the incoming data for some known problems that can occur.""" # Detect the datatypes that are in file. - if "GAZE" in raw_extras["rec_info"]: + units = [] + pupil_units = [] + modes = [] + eyes = [] + sfreqs = [] + for block in data_blocks: + units.append(block["info"]["unit"]) + modes.append(block["info"]["tracking_mode"]) + eyes.append(block["info"]["eye"]) + sfreqs.append(block["info"]["sfreq"]) + pupil_units.append(block["info"]["pupil_unit"]) + if "GAZE" in units: logger.info( "Pixel coordinate data detected." "Pass `scalings=dict(eyegaze=1e3)` when using plot" " method to make traces more legible." ) - - elif "HREF" in raw_extras["rec_info"]: + if "HREF" in units: logger.info("Head-referenced eye-angle (HREF) data detected.") - elif "PUPIL" in raw_extras["rec_info"]: + elif "PUPIL" in units: warn("Raw eyegaze coordinates detected. Analyze with caution.") - if "AREA" in raw_extras["pupil_info"]: + if "AREA" in pupil_units: logger.info("Pupil-size area detected.") - elif "DIAMETER" in raw_extras["pupil_info"]: + elif "DIAMETER" in pupil_units: logger.info("Pupil-size diameter detected.") - # If more than 1 recording period, check whether eye being tracked changed. - if raw_extras["n_blocks"] > 1: - if raw_extras["tracking_mode"] == "monocular": - blocks_list = raw_extras["event_lines"]["SAMPLES"] - eye_per_block = [block_info[1].lower() for block_info in blocks_list] - if not all([this_eye == raw_extras["eye"] for this_eye in eye_per_block]): - warn( - "The eye being tracked changed during the" - " recording. The channel names will reflect" - " the eye that was tracked at the start of" - " the recording." - ) + + if len(set(modes)) > 1: + warn( + "This recording switched between monocular and binocular tracking. " + f"In order of acquisition blocks, tracking modes were {modes}. Data " + "for the missing eye during monocular tracking will be filled with NaN." + ) + # Monocular tracking but switched between left/right eye + elif len(set(eyes)) > 1: + warn( + "The eye being tracked changed during the recording. " + f"In order of acquisition blocks, they were {eyes}. " + "Missing data for each eye will be filled with NaN." + ) + if len(set(sfreqs)) > 1: + raise RuntimeError( + "The sampling frequency changed during the recording. " + f"In order of acquisition blocks, they were {sfreqs}. " + "please notify MNE-Python developers" + ) # pragma: no cover + if len(set(units)) > 1: + raise RuntimeError( + "The unit of measurement for x/y coordinates changed during the recording. " + f"In order of acquisition blocks, they were {units}. " + "please notify MNE-Python developers" + ) # pragma: no cover def _get_recording_datetime(fname): @@ -203,23 +253,28 @@ def _get_recording_datetime(fname): return -def _get_metadata(raw_extras): - """Get tracking mode, sfreq, eye tracked, pupil metric, etc. - - Don't call this until after _parse_recording_blocks. - """ +def _get_metadata(data_block: dict): + """Get tracking mode, sfreq, eye tracked, pupil metric, etc. for one data block.""" meta_data = dict() - meta_data["rec_info"] = raw_extras["event_lines"]["SAMPLES"][0] - if ("LEFT" in meta_data["rec_info"]) and ("RIGHT" in meta_data["rec_info"]): + rec_info = data_block["events"]["SAMPLES"][0] + meta_data["unit"] = rec_info[0] + + # If the file doesn't have pupil data, i'm not sure if there will be any PUPIL info? + if not data_block["events"]["PUPIL"]: + ps_unit = None + else: + ps_unit = data_block["events"]["PUPIL"][0][0] + meta_data["pupil_unit"] = ps_unit + if ("LEFT" in rec_info) and ("RIGHT" in rec_info): meta_data["tracking_mode"] = "binocular" meta_data["eye"] = "both" else: meta_data["tracking_mode"] = "monocular" - meta_data["eye"] = meta_data["rec_info"][1].lower() - meta_data["first_samp"] = float(raw_extras["event_lines"]["START"][0][0]) - meta_data["sfreq"] = _get_sfreq_from_ascii(meta_data["rec_info"]) - meta_data["pupil_info"] = raw_extras["event_lines"]["PUPIL"][0] - meta_data["n_blocks"] = len(raw_extras["event_lines"]["START"]) + meta_data["eye"] = rec_info[1].lower() + meta_data["first_timestamp"] = float(data_block["events"]["START"][0][0]) + meta_data["last_timestamp"] = float(data_block["events"]["END"][0][0]) + meta_data["sfreq"] = _get_sfreq_from_ascii(rec_info) + meta_data["rec_info"] = data_block["events"]["SAMPLES"][0] return meta_data @@ -268,35 +323,66 @@ def _get_sfreq_from_ascii(rec_info): return float(rec_info[rec_info.index("RATE") + 1]) -def _create_dataframes(raw_extras, apply_offsets): - """Create pandas.DataFrame for Eyelink samples and events. +def _create_dataframes(data_blocks, apply_offsets): + """Create and process pandas DataFrames for each recording block. + + Processes each block individually with its own column structure, + then returns a list of processed block dataframes. + """ + processed_blocks = [] + + for block_idx, block in enumerate(data_blocks): + # Create dataframes for this block + block_dfs = _create_dataframes_for_block(block, apply_offsets) + + # Infer column names for this specific block + col_names, ch_names = _infer_col_names_for_block(block) + + # Assign column names and set dtypes for this block + block_dfs = _assign_col_names(col_names, block_dfs) + block_dfs = _set_df_dtypes(block_dfs) + + processed_blocks.append( + { + "block_idx": block_idx, + "dfs": block_dfs, + "ch_names": ch_names, + "info": block["info"], + } + ) + return processed_blocks + + +def _create_dataframes_for_block(block, apply_offsets): + """Create pandas.DataFrame for one recording block's samples and events. Creates a pandas DataFrame for sample_lines and for each - non-empty key in event_lines. + non-empty key in event_lines for a single recording block. + No column names are assigned at this point. + This also returns the MNE channel names needed to represent this block of data. """ pd = _check_pandas_installed() df_dict = dict() - # dataframe for samples - df_dict["samples"] = pd.DataFrame(raw_extras["sample_lines"]) - df_dict["samples"] = _drop_status_col(df_dict["samples"]) # drop STATUS col + # dataframe for samples in this block + if block["samples"]: + df_dict["samples"] = pd.DataFrame(block["samples"]) + df_dict["samples"] = _drop_status_col(df_dict["samples"]) # drop STATUS col - # dataframe for each type of occular event + # dataframe for each type of occular event in this block for event, label in zip( ["EFIX", "ESACC", "EBLINK"], ["fixations", "saccades", "blinks"] ): - if raw_extras["event_lines"][event]: # an empty list returns False - df_dict[label] = pd.DataFrame(raw_extras["event_lines"][event]) + if block["events"][event]: # an empty list returns False + df_dict[label] = pd.DataFrame(block["events"][event]) else: - logger.info( - f"No {label} were found in this file. " - f"Not returning any info on {label}." - ) + # Changed this from info to debug level to avoid spamming the log + logger.debug(f"No {label} events found in block") - # make dataframe for experiment messages - if raw_extras["event_lines"]["MSG"]: + # make dataframe for experiment messages in this block + if block["events"]["MSG"]: msgs = [] - for token in raw_extras["event_lines"]["MSG"]: + for token in block["events"]["MSG"]: if apply_offsets and len(token) == 2: ts, msg = token offset = np.nan @@ -314,47 +400,20 @@ def _create_dataframes(raw_extras, apply_offsets): msgs.append([ts, offset, msg]) df_dict["messages"] = pd.DataFrame(msgs) - # make dataframe for recording block start, end times - i = 1 - blocks = list() - for bgn, end in zip( - raw_extras["event_lines"]["START"], raw_extras["event_lines"]["END"] - ): - blocks.append((float(bgn[0]), float(end[0]), i)) - i += 1 - cols = ["time", "end_time", "block"] - df_dict["recording_blocks"] = pd.DataFrame(blocks, columns=cols) - - # TODO: Make dataframes for other eyelink events (Buttons) + # TODO: Make dataframes for other eyelink events (Buttons) return df_dict -def _drop_status_col(samples_df): - """Drop STATUS column from samples dataframe. - - see https://github.com/mne-tools/mne-python/issues/11809, and section 4.9.2.1 of - the Eyelink 1000 Plus User Manual, version 1.0.19. We know that the STATUS - column is either 3, 5, 13, or 17 characters long, i.e. "...", ".....", ".C." - """ - status_cols = [] - # we know the first 3 columns will be the time, xpos, ypos - for col in samples_df.columns[3:]: - if samples_df[col][0][0].isnumeric(): - # if the value is numeric, it's not a status column - continue - if len(samples_df[col][0]) in [3, 5, 13, 17]: - status_cols.append(col) - return samples_df.drop(columns=status_cols) - - -def _infer_col_names(raw_extras): - """Build column and channel names for data from Eyelink ASCII file. +def _infer_col_names_for_block(block: dict) -> tuple[dict[str, list], list]: + """Build column and channel names for data from one Eyelink recording block. Returns the expected column names for the sample lines and event - lines, to be passed into pd.DataFrame. The columns present in an eyelink ASCII - file can vary. The order that col_names are built below should NOT change. + lines for a single recording block. The columns present can vary + between blocks if tracking mode changes. """ col_names = {} + block_info = block["info"] + # initiate the column names for the sample lines col_names["samples"] = list(EYELINK_COLS["timestamp"]) col_names["messages"] = list(EYELINK_COLS["messages"]) @@ -364,21 +423,24 @@ def _infer_col_names(raw_extras): col_names["fixations"] = list(EYELINK_COLS["eye_event"] + EYELINK_COLS["fixation"]) col_names["saccades"] = list(EYELINK_COLS["eye_event"] + EYELINK_COLS["saccade"]) - # Recording was either binocular or monocular - # If monocular, find out which eye was tracked and append to ch_name - if raw_extras["tracking_mode"] == "monocular": - eye = raw_extras["eye"] + # Get block-specific tracking info + tracking_mode = block_info["tracking_mode"] + eye = block_info["eye"] + rec_info = block["events"]["SAMPLES"][0] # SAMPLES line for this block + + # Recording was either binocular or monocular for this block + if tracking_mode == "monocular": ch_names = list(EYELINK_COLS["pos"][eye]) - elif raw_extras["tracking_mode"] == "binocular": + elif tracking_mode == "binocular": ch_names = list(EYELINK_COLS["pos"]["left"] + EYELINK_COLS["pos"]["right"]) col_names["samples"].extend(ch_names) # The order of these if statements should not be changed. - if "VEL" in raw_extras["rec_info"]: # If velocity data are reported - if raw_extras["tracking_mode"] == "monocular": + if "VEL" in rec_info: # If velocity data are reported + if tracking_mode == "monocular": ch_names.extend(EYELINK_COLS["velocity"][eye]) col_names["samples"].extend(EYELINK_COLS["velocity"][eye]) - elif raw_extras["tracking_mode"] == "binocular": + elif tracking_mode == "binocular": ch_names.extend( EYELINK_COLS["velocity"]["left"] + EYELINK_COLS["velocity"]["right"] ) @@ -386,31 +448,118 @@ def _infer_col_names(raw_extras): EYELINK_COLS["velocity"]["left"] + EYELINK_COLS["velocity"]["right"] ) # if resolution data are reported - if "RES" in raw_extras["rec_info"]: + if "RES" in rec_info: ch_names.extend(EYELINK_COLS["resolution"]) col_names["samples"].extend(EYELINK_COLS["resolution"]) col_names["fixations"].extend(EYELINK_COLS["resolution"]) col_names["saccades"].extend(EYELINK_COLS["resolution"]) # if digital input port values are reported - if "INPUT" in raw_extras["rec_info"]: + if "INPUT" in rec_info: ch_names.extend(EYELINK_COLS["input"]) col_names["samples"].extend(EYELINK_COLS["input"]) # if head target info was reported, add its cols - if "HTARGET" in raw_extras["rec_info"]: + if "HTARGET" in rec_info: ch_names.extend(EYELINK_COLS["remote"]) col_names["samples"].extend(EYELINK_COLS["remote"]) return col_names, ch_names +def _combine_block_dataframes(processed_blocks: list[dict]): + """Combine dataframes across acquisition blocks. + + Handles cases where blocks have different columns/data in them + (e.g. binocular vs monocular tracking, or switching between the left and right eye). + """ + pd = _check_pandas_installed() + + # Determine unified column structure by collecting all unique column names + # across all acquisition blocks + all_ch_names = [] + all_samples_cols = set() + all_df_types = set() + + for block in processed_blocks: + # The tests assume a certain order of channel names. + # so we can't use a set like we do for the columns. + # bc it randomly orders the channel names. + for ch_name in block["ch_names"]: + if ch_name not in all_ch_names: + all_ch_names.append(ch_name) + if "samples" in block["dfs"]: + all_samples_cols.update(block["dfs"]["samples"].columns) + all_df_types.update(block["dfs"].keys()) + + # The sets randomly ordered the column names. + all_samples_cols = sorted(all_samples_cols) + + # Combine dataframes by type + combined_dfs = {} + + for df_type in all_df_types: + block_dfs = [] + + for block in processed_blocks: + if df_type in block["dfs"]: + # We will update the dfs in-place to conserve memory + block_df = block["dfs"][df_type] + + # For samples dataframes, ensure all have the same columns + if df_type == "samples": + for col in all_samples_cols: + if col not in block_df.columns: + block_df[col] = np.nan + + # Reorder columns + block_df = block_df[all_samples_cols] + + block_dfs.append(block_df) + + if block_dfs: + # Concatenate all blocks for this dataframe type + combined_dfs[df_type] = pd.concat(block_dfs, ignore_index=True) + + # Create recording blocks dataframe from block info + blocks_data = [] + for i, block in enumerate(processed_blocks): + start_time = block["info"]["first_timestamp"] + end_time = block["info"]["last_timestamp"] + blocks_data.append((start_time, end_time, i + 1)) + combined_dfs["recording_blocks"] = pd.DataFrame( + blocks_data, columns=["time", "end_time", "block"] + ) + + return combined_dfs, all_ch_names + + +def _drop_status_col(samples_df): + """Drop STATUS column from samples dataframe. + + see https://github.com/mne-tools/mne-python/issues/11809, and section 4.9.2.1 of + the Eyelink 1000 Plus User Manual, version 1.0.19. We know that the STATUS + column is either 3, 5, 13, or 17 characters long, i.e. "...", ".....", ".C." + """ + status_cols = [] + # we know the first 3 columns will be the time, xpos, ypos + for col in samples_df.columns[3:]: + if samples_df[col][0][0].isnumeric(): + # if the value is numeric, it's not a status column + continue + if len(samples_df[col][0]) in [3, 5, 13, 17]: + status_cols.append(col) + return samples_df.drop(columns=status_cols) + + def _assign_col_names(col_names, df_dict): """Assign column names to dataframes. Parameters ---------- - col_names : dict + col_names : dict of str to list Dictionary of column names for each dataframe. + df_dict : dict of str to pandas.DataFrame + Dictionary of dataframes to assign column names to. """ skipped_types = [] for key, df in df_dict.items(): @@ -680,7 +829,7 @@ def _create_info(ch_names, raw_extras): f"leaving index 4 of loc array as" f" {ch_dict['loc'][4]} for {ch_dict['ch_name']}" ) - if "HREF" in raw_extras["rec_info"]: + if raw_extras["pos_unit"] == "HREF": if ch_dict["ch_name"].startswith(("xpos", "ypos")): ch_dict["unit"] = FIFF.FIFF_UNIT_RAD return info diff --git a/mne/io/eyelink/tests/test_eyelink.py b/mne/io/eyelink/tests/test_eyelink.py index 2ad7e9d6b13..22191a4d6a4 100644 --- a/mne/io/eyelink/tests/test_eyelink.py +++ b/mne/io/eyelink/tests/test_eyelink.py @@ -194,6 +194,114 @@ def test_find_overlaps(): assert overlap_df["eye"].iloc[0] == "both" +@requires_testing_data +@pytest.mark.parametrize("fname", [fname]) +def test_bino_to_mono(tmp_path, fname): + """Test a file that switched from binocular to monocular mid-recording.""" + out_file = tmp_path / "tmp_eyelink.asc" + in_file = Path(fname) + + lines = in_file.read_text("utf-8").splitlines() + # We'll also add some binocular velocity data to increase our testing coverage. + start_idx = [li for li, line in enumerate(lines) if line.startswith("START")][0] + for li, line in enumerate(lines[start_idx:-2], start=start_idx): + tokens = line.split("\t") + event_type = tokens[0] + if event_type == "SAMPLES": + tokens.insert(3, "VEL") + lines[li] = "\t".join(tokens) + elif event_type.isnumeric(): + # fake velocity values for x/y left/right + tokens[4:4] = ["999.1", "999.2", "999.3", "999.4"] + lines[li] = "\t".join(tokens) + end_line = lines[-2] + end_ts = int(end_line.split("\t")[1]) + # Now only left eye data + second_block = [] + new_ts = end_ts + 1 + info = [ + "GAZE", + "LEFT", + "VEL", + "RATE", + "500.00", + "TRACKING", + "CR", + "FILTER", + "2", + ] + start = ["START", f"{new_ts}", "LEFT", "SAMPLES", "EVENTS"] + pupil = ["PUPIL", "DIAMETER"] + samples = ["SAMPLES"] + info + events = ["EVENTS"] + info + second_block.append("\t".join(start) + "\n") + second_block.append("\t".join(pupil) + "\n") + second_block.append("\t".join(samples) + "\n") + second_block.append("\t".join(events) + "\n") + # Some fake data.. # x, y, pupil, velicty x/y status + left = ["960", "540", "0.0", "999.1", "999.2", "..."] + NUM_FAKE_SAMPLES = 4000 + for ii in range(NUM_FAKE_SAMPLES): + ts = new_ts + ii + tokens = [f"{ts}"] + left + second_block.append("\t".join(tokens) + "\n") + # interleave some events into the second block + duration = 500 + blink_ts = new_ts + 500 + end_blink = ["EBLINK", "L", f"{blink_ts}", f"{blink_ts + 50}", "106"] + fix_ts = new_ts + 1500 + end_fix = [ + "EFIX", + "L", + f"{fix_ts}", + f"{fix_ts + duration}", + "1616", + "1025.1", + "580.9", + "1289", + ] + sacc_ts = new_ts + 2500 + end_sacc = [ + "ESACC", + "L", + f"{sacc_ts}", + f"{sacc_ts + duration}", + "52", + "1029.6", + "582.3", + "581.7", + "292.5", + "10.30", + "387", + ] + second_block.append("\t".join(end_blink) + "\n") + second_block.append("\t".join(end_fix) + "\n") + second_block.append("\t".join(end_sacc) + "\n") + end_ts = ts + 1 + end_block = ["END", f"{end_ts}", "SAMPLES", "EVENTS", "RES", "45", "45"] + second_block.append("\t".join(end_block)) + lines += second_block + out_file.write_text("\n".join(lines), encoding="utf-8") + + with pytest.warns( + RuntimeWarning, match="This recording switched between monocular and binocular" + ): + raw = read_raw_eyelink(out_file) + want_channels = [ + "xpos_left", + "ypos_left", + "pupil_left", + "xpos_right", + "ypos_right", + "pupil_right", + "xvel_left", + "yvel_left", + "xvel_right", + "yvel_right", + ] + assert len(set(raw.info["ch_names"]).difference(set(want_channels))) == 0 + + def _simulate_eye_tracking_data(in_file, out_file): out_file = Path(out_file) @@ -252,13 +360,19 @@ def _simulate_eye_tracking_data(in_file, out_file): @requires_testing_data @pytest.mark.parametrize("fname", [fname_href]) def test_multi_block_misc_channels(fname, tmp_path): - """Test an eyelink file with multiple blocks and additional misc channels.""" + """Test a file with many edge casses. + + This file has multiple acquisition blocks, each tracking a different eye. + The coordinates are in raw units (not pixels or radians). + It has some misc channels (head position, saccade velocity, etc.) + """ out_file = tmp_path / "tmp_eyelink.asc" _simulate_eye_tracking_data(fname, out_file) with ( _record_warnings(), pytest.warns(RuntimeWarning, match="Raw eyegaze coordinates"), + pytest.warns(RuntimeWarning, match="The eye being tracked changed"), ): raw = read_raw_eyelink(out_file, apply_offsets=True) @@ -274,6 +388,11 @@ def test_multi_block_misc_channels(fname, tmp_path): "x_head", "y_head", "distance", + "xpos_left", + "ypos_left", + "pupil_left", + "xvel_left", + "yvel_left", ] assert raw.ch_names == chs_in_file