Skip to content

Commit

Permalink
Merge pull request #20 from catalystneuro/add_visual_stimulus_interface
Browse files Browse the repository at this point in the history
Add visual stimulus interface
  • Loading branch information
alessandratrapani authored May 28, 2024
2 parents a5a1502 + 3cf37a3 commit 654352c
Show file tree
Hide file tree
Showing 5 changed files with 148 additions and 50 deletions.
1 change: 1 addition & 0 deletions src/higley_lab_to_nwb/lohani_2022/interfaces/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
from .lohani_2022_imaginginterface import Lohani2022MesoscopicImagingInterface
from .lohani_2022_spike2signals_interface import Lohani2022Spike2SignalsInterface
from .lohani_2022_visual_stimulus_interface import Lohani2022VisualStimulusInterface
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ def __init__(
file_path: FilePathType,
ttl_stream_ids_to_names_map: dict,
behavioral_stream_ids_to_names_map: dict,
stimulus_stream_ids_to_names_map: dict = None,
verbose: bool = True,
):
"""
Expand All @@ -64,8 +63,6 @@ def __init__(
If there are several streams for ttl signals, specify the stream id and associated name.
behavioral_stream_ids_to_names_map: dict
If there are several streams for behavioural signals, specify the stream id and associated name.
stimulus_stream_ids_to_names_map: dict
If there are several streams for external stimuli, specify the stream id and associated name.
verbose : bool, default: True
"""
_test_sonpy_installation()
Expand All @@ -77,7 +74,6 @@ def __init__(

self.ttl_stream_ids_to_names_map = ttl_stream_ids_to_names_map
self.behavioral_stream_ids_to_names_map = behavioral_stream_ids_to_names_map
self.stimulus_stream_ids_to_names_map = stimulus_stream_ids_to_names_map

def get_metadata(self) -> dict:
metadata = super().get_metadata()
Expand Down Expand Up @@ -129,25 +125,6 @@ def add_to_nwbfile(self, nwbfile: NWBFile, metadata: dict, stub_test: bool = Fal
nwbfile.add_acquisition(ttl_types_table)
nwbfile.add_acquisition(ttls_table)

if self.stimulus_stream_ids_to_names_map is not None:

for stream_id, stream_name in self.stimulus_stream_ids_to_names_map.items():
intervals_table = TimeIntervals(
name=stream_name,
description=f"Intervals for each {stream_name}",
)
start_times = self.get_event_times_from_ttl(stream_id=stream_id)
stop_times = self.get_event_times_from_ttl(stream_id=stream_id,rising=False)

if len(start_times):
for start,stop in zip(start_times[:end_frame], stop_times[:end_frame]):
intervals_table.add_row(
start_time=start,
stop_time=stop,
)

nwbfile.add_time_intervals(intervals_table)

for stream_id, stream_name in self.behavioral_stream_ids_to_names_map.items():
extractor = CedRecordingExtractor(file_path=str(self.source_data["file_path"]), stream_id=stream_id)
gain, offset = _get_stream_gain_offset(file_path=str(self.source_data["file_path"]), stream_id=stream_id)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
from typing import List
import pandas as pd
from neo import io

from neuroconv import BaseTemporalAlignmentInterface
from neuroconv.tools import get_package
from neuroconv.utils import FilePathType
from neuroconv.tools.signal_processing import get_rising_frames_from_ttl, get_falling_frames_from_ttl
from spikeinterface.extractors import CedRecordingExtractor
from pynwb import NWBFile
from pynwb.epoch import TimeIntervals


def _test_sonpy_installation() -> None:
get_package(
package_name="sonpy",
excluded_python_versions=["3.10", "3.11"],
excluded_platforms_and_python_versions=dict(darwin=dict(arm=["3.8", "3.9", "3.10", "3.11"])),
)


def get_streams(file_path: FilePathType) -> List[str]:
"""Return a list of channel names as set in the recording extractor."""
r = io.CedIO(filename=file_path)
signal_channels = r.header["signal_channels"]
stream_ids = signal_channels["id"]
stream_names = signal_channels["name"]
return stream_ids, stream_names


class Lohani2022VisualStimulusInterface(BaseTemporalAlignmentInterface):
"""
Data interface class for converting Spike2 visual stimulus signals from CED (Cambridge Electronic
Design) using the :py:class:`~spikeinterface.extractors.CedRecordingExtractor`."""

display_name = "Spike2 Recording"
associated_suffixes = (".smr", ".smrx")
info = "Interface for Spike2 analogue signals from CED (Cambridge Electronic Design)."

def __init__(
self,
spike2_file_path: FilePathType,
csv_file_path: FilePathType,
stream_id: str,
verbose: bool = True,
):
"""
Parameters
----------
spike2_file_path : FilePathType
Path to .smr or .smrx file.
csv_file_path : FilePathType
Path to .csv file for visual stimulus characterization.
verbose : bool, default: True
"""
_test_sonpy_installation()

super().__init__(
spike2_file_path=spike2_file_path,
csv_file_path=csv_file_path,
stream_id=stream_id,
verbose=verbose,
)

def get_event_times_from_ttl(self, rising: bool = True):
extractor = CedRecordingExtractor(
file_path=str(self.source_data["spike2_file_path"]), stream_id=self.source_data["stream_id"]
)
times = extractor.get_times()
traces = extractor.get_traces()
if rising:
event_times = get_rising_frames_from_ttl(traces)
else:
event_times = get_falling_frames_from_ttl(traces)

return times[event_times]

def get_stimulus_feature(self, column_index):
feature = pd.read_csv(self.source_data["csv_file_path"], usecols=column_index)
return feature.to_numpy()

def add_to_nwbfile(self, nwbfile: NWBFile, metadata: dict, stub_test: bool = False) -> None:

intervals_table = TimeIntervals(
name="VisualStimulus",
description=f"Intervals for each visual stimulus presentation",
)

intervals_table.add_column(name="contrast", description="Contrast of the visual stimulus image.")
contrasts = self.get_stimulus_feature(column_index=[0])
intervals_table.add_column(name="orientation", description="Orientation of the visual stimulus image, in degree.")
orientations = self.get_stimulus_feature(column_index=[1])
intervals_table.add_column(name="stimulus_frequency", description="Temporal frequency of the stimulus, in Hz.")
stimulus_frequencies = self.get_stimulus_feature(column_index=[2])
intervals_table.add_column(
name="spatial_frequency", description="Spatial frequency of the stimulus, in cycles per degrees."
)
spatial_frequencies = self.get_stimulus_feature(column_index=[3])
intervals_table.add_column(name="stimulus_size", description="Size of the visual stimulus, in degrees.")
sizes = self.get_stimulus_feature(column_index=[4])
#TODO add a more descriptive text as description for "screen_coordinates" column
intervals_table.add_column(name="screen_coordinates", description="Visual stimulus coordinates on the screen.")
screen_coordinates = self.get_stimulus_feature(column_index=[5, 6, 7, 8])

start_times = self.get_event_times_from_ttl()
stop_times = self.get_event_times_from_ttl(rising=False)

n_frames = 100 if stub_test else len(start_times)

for frame in range(n_frames):
intervals_table.add_row(
start_time=start_times[frame],
stop_time=stop_times[frame],
contrast=contrasts[frame][0],
orientation=orientations[frame][0],
stimulus_frequency=stimulus_frequencies[frame][0],
spatial_frequency=spatial_frequencies[frame][0],
stimulus_size=sizes[frame][0],
screen_coordinates=screen_coordinates[frame][:],
)

nwbfile.add_time_intervals(intervals_table)
40 changes: 18 additions & 22 deletions src/higley_lab_to_nwb/lohani_2022/lohani_2022_convert_session.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,7 @@ def session_to_nwb(
search_pattern = "_".join(session_id.split("_")[:2])

# Add Analog signals from Spike2
smrx_files = glob.glob(os.path.join(folder_path, f"{search_pattern}*.smrx"))
file_path = smrx_files[0]
file_path = glob.glob(os.path.join(folder_path, f"{search_pattern}*.smrx"))[0]
stream_ids, stream_names = get_streams(file_path=file_path)

# Define each smrx signal name
Expand All @@ -43,33 +42,30 @@ def session_to_nwb(
behavioral_name_map = {
stream_ids[stream_names == "wheel"][0]: "WheelSignal",
}
stimulus_name_map = {
stream_ids[stream_names == "Vis"][0]: "VisualStimulus",
# stream_ids[stream_names == "airpuff"][0]: "AirpuffStimulus",
}
if "vis_stim" in session_id:
source_data.update(
dict(
Spike2Signals=dict(
file_path=file_path,
ttl_stream_ids_to_names_map=TTLsignals_name_map,
behavioral_stream_ids_to_names_map=behavioral_name_map,
stimulus_stream_ids_to_names_map=stimulus_name_map,
)

source_data.update(
dict(
Spike2Signals=dict(
file_path=file_path,
ttl_stream_ids_to_names_map=TTLsignals_name_map,
behavioral_stream_ids_to_names_map=behavioral_name_map,
)
)
else:
)
conversion_options.update(dict(Spike2Signals=dict(stub_test=stub_test)))

if "vis_stim" in session_id:
csv_file_path = glob.glob(os.path.join(folder_path, f"{search_pattern}*.csv"))[0]
source_data.update(
dict(
Spike2Signals=dict(
file_path=file_path,
ttl_stream_ids_to_names_map=TTLsignals_name_map,
behavioral_stream_ids_to_names_map=behavioral_name_map,
VisualStimulusInterface=dict(
spike2_file_path=file_path,
csv_file_path=csv_file_path,
stream_id=stream_ids[stream_names == "Vis"][0],
)
)
)

conversion_options.update(dict(Spike2Signals=dict(stub_test=stub_test)))
conversion_options.update(dict(VisualStimulusInterface=dict(stub_test=stub_test)))

# Add Imaging
sampling_frequency = 10.0
Expand Down
12 changes: 7 additions & 5 deletions src/higley_lab_to_nwb/lohani_2022/lohani_2022nwbconverter.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
"""Primary NWBConverter class for this dataset."""

from typing import Dict, List, Optional, Tuple, Union
from typing import Dict, List
from neuroconv import NWBConverter
from higley_lab_to_nwb.lohani_2022.interfaces.lohani_2022_spike2signals_interface import (
from higley_lab_to_nwb.lohani_2022.interfaces import (
Lohani2022MesoscopicImagingInterface,
Lohani2022Spike2SignalsInterface,
Lohani2022VisualStimulusInterface,
)
from higley_lab_to_nwb.lohani_2022.interfaces.lohani_2022_imaginginterface import Lohani2022MesoscopicImagingInterface
from neuroconv.datainterfaces import VideoInterface, FacemapInterface


Expand All @@ -16,6 +17,7 @@ class Lohani2022NWBConverter(NWBConverter):
Spike2Signals=Lohani2022Spike2SignalsInterface,
Video=VideoInterface,
FacemapInterface=FacemapInterface,
VisualStimulusInterface=Lohani2022VisualStimulusInterface,
)

def __init__(
Expand All @@ -31,8 +33,8 @@ def __init__(
for channel in channels:
suffix = f"{excitation_type}Excitation{channel}Channel"
interface_name = f"Imaging{suffix}"
self.data_interface_classes[interface_name]=Lohani2022MesoscopicImagingInterface
self.data_interface_classes[interface_name] = Lohani2022MesoscopicImagingInterface

self.verbose = verbose
self._validate_source_data(source_data=source_data, verbose=self.verbose)

Expand Down

0 comments on commit 654352c

Please sign in to comment.